Compare commits

...

203 Commits
0.2.0 ... 0.7.5

Author SHA1 Message Date
Greg Heartsfield
e8557d421b build: bump version to 0.7.5 2022-12-16 17:21:00 -06:00
Greg Heartsfield
7ca9c864f2 improvement: DB pool logging shows used connections directly 2022-12-16 17:01:49 -06:00
Greg Heartsfield
838aafd079 improvement: consistent log messages for client/sub ids 2022-12-16 15:22:27 -06:00
Greg Heartsfield
e554b10ac2 improvement: tweak sub/sql logging for slow queries 2022-12-16 14:55:45 -06:00
Greg Heartsfield
b0bfaa48fc improvement: ignore duplicate REQ messages 2022-12-16 14:37:02 -06:00
Greg Heartsfield
2e9b1b6ba7 docs: comment reason for force_no_match 2022-12-16 14:35:21 -06:00
Greg Heartsfield
4d9012d94c improvement: upgrade docker builder and base images 2022-12-16 14:33:08 -06:00
Greg Heartsfield
ffe7aac066 improvement: upgrade multiple dependencies
Updating aho-corasick v0.7.19 -> v0.7.20
Updating async-trait v0.1.58 -> v0.1.59
Updating axum v0.5.17 -> v0.6.1
Updating axum-core v0.2.9 -> v0.3.0
Updating bytes v1.2.1 -> v1.3.0
Updating cc v1.0.76 -> v1.0.78
Updating crossbeam-utils v0.8.12 -> v0.8.14
Updating cxx v1.0.82 -> v1.0.83
Updating cxx-build v1.0.82 -> v1.0.83
Updating cxxbridge-flags v1.0.82 -> v1.0.83
Updating cxxbridge-macro v1.0.82 -> v1.0.83
Updating flate2 v1.0.24 -> v1.0.25
Updating libc v0.2.137 -> v0.2.138
Updating matchit v0.5.0 -> v0.7.0
Updating miniz_oxide v0.5.4 -> v0.6.2
Updating openssl v0.10.42 -> v0.10.44
Updating openssl-sys v0.9.77 -> v0.9.79
Updating parking_lot_core v0.9.4 -> v0.9.5
Updating pest v2.4.1 -> v2.5.1
Updating pest_derive v2.4.1 -> v2.5.1
Updating pest_generator v2.4.1 -> v2.5.1
Updating pest_meta v2.4.1 -> v2.5.1
Updating prost v0.11.2 -> v0.11.3
Adding rustversion v1.0.9
Updating serde v1.0.147 -> v1.0.150
Updating serde_derive v1.0.147 -> v1.0.150
Updating serde_json v1.0.88 -> v1.0.89
Updating sha-1 v0.10.0 -> v0.10.1
Updating syn v1.0.103 -> v1.0.105
Updating tokio v1.22.0 -> v1.23.0
Updating tokio-macros v1.8.0 -> v1.8.2
Updating toml v0.5.9 -> v0.5.10
Updating tonic v0.8.2 -> v0.8.3
Updating tower-http v0.3.4 -> v0.3.5
Updating typenum v1.15.0 -> v1.16.0
2022-12-16 11:17:05 -06:00
Greg Heartsfield
f9695bd0a9 fix: db schema version updates correctly for v9 2022-12-16 10:01:49 -06:00
Greg Heartsfield
7c4bf5cc8f fix: run db migration for v9 2022-12-16 08:21:00 -06:00
Greg Heartsfield
e2de162931 feat: only show SQL in logs for slow queries unless tracing 2022-12-16 08:17:39 -06:00
Greg Heartsfield
4f606615eb perf: indexing improvement 2022-12-16 08:16:49 -06:00
Greg Heartsfield
84a58ebbcd build: bump version to 0.7.3 2022-12-16 06:32:00 -06:00
Greg Heartsfield
c48e45686d perf: schema updates for better event indexing 2022-12-15 08:48:35 -06:00
Greg Heartsfield
bbe359364a refactor: clippy warnings 2022-12-15 08:43:36 -06:00
Greg Heartsfield
9e9c494367 perf: significant query speedup when using kinds.
fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/51
2022-12-14 21:04:49 -06:00
Greg Heartsfield
5fa24bc9f1 fix: send EOSE when ids list is empty in subscriptions
Fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/50
2022-11-19 10:35:00 -06:00
Greg Heartsfield
4de7490d97 fix: send EOSE when authors list is empty in subscriptions
Fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/49
2022-11-19 10:00:38 -06:00
Greg Heartsfield
d0f63dc66e docs: update container instructions for rootless podman 2022-11-19 09:32:26 -06:00
Greg Heartsfield
06078648c8 build: bump version to 0.7.2 2022-11-19 07:55:52 -06:00
Greg Heartsfield
cc0fcc5d66 docs: add Cargo package metadata 2022-11-19 07:32:17 -06:00
Greg Heartsfield
dfb2096653 improvement: build auditable binary in docker 2022-11-19 07:11:39 -06:00
Greg Heartsfield
486508d192 improvement: upgrade multiple dependencies
Updating crates.io index
Updating cc v1.0.74 -> v1.0.76
Updating chrono v0.4.22 -> v0.4.23
Updating cxx v1.0.80 -> v1.0.82
Updating cxx-build v1.0.80 -> v1.0.82
Updating cxxbridge-flags v1.0.80 -> v1.0.82
Updating cxxbridge-macro v1.0.80 -> v1.0.82
Updating digest v0.10.5 -> v0.10.6
Updating hyper v0.14.22 -> v0.14.23
Updating indexmap v1.9.1 -> v1.9.2
Updating regex v1.6.0 -> v1.7.0
Updating regex-syntax v0.6.27 -> v0.6.28
Updating serde_json v1.0.87 -> v1.0.88
Updating tokio v1.21.2 -> v1.22.0
Updating uuid v1.2.1 -> v1.2.2
2022-11-19 06:52:06 -06:00
Greg Heartsfield
84b43c144b improvement: use locked cargo packages to build container images 2022-11-19 06:29:13 -06:00
Greg Heartsfield
110500bb46 feat(NIP-20): advertise support for NIP-20 in relay info/readme 2022-11-12 09:22:43 -06:00
Greg Heartsfield
83f6b11de7 refactor: clippy fix 2022-11-12 09:22:24 -06:00
William Casarin
6d1244434b feat(NIP-20): improve invalid event error messages
Instead of returning a NOTICE for invalid events, return a `OK false`
command result with a reason as to why the event is invalid.
2022-11-12 09:13:22 -06:00
William Casarin
5a91419d34 feat(NIP-20): send command results to clients
When submitting events to relays, clients currently have no way to know
if an event was successfully committed to the database. This NIP
introduces the concept of command results which are like NOTICE's except
provide more information about if an event was accepted or rejected.

A command result is a JSON object with the following structure that is
returned when an event is successfully saved to the database or
rejected:

	["OK", <event_id>, <true|false>, <message>]

nip20: https://github.com/nostr-protocol/nips/pull/62
2022-11-12 09:12:35 -06:00
William Casarin
7adc5c9af7 perf: dont create intermediate vecs when matching subs
Avoid creating intermediate vectors when matching subscriptions. We can
just iterate over the hashmap directly.
2022-11-09 07:30:43 -06:00
Greg Heartsfield
9dd4571bee refactor: reduce level of some common DB logs 2022-11-06 13:49:32 -06:00
Greg Heartsfield
9db5a26b9c refactor: more consistent logging messages 2022-11-05 16:11:20 -05:00
Greg Heartsfield
ac345b5744 refactor: do not quote server-generated client id in logs 2022-11-05 15:59:39 -05:00
Greg Heartsfield
675662c7fb improvement: upgrade docker builder and base images 2022-11-05 13:24:17 -05:00
Greg Heartsfield
505b0cb71f improvement: upgrade multiple dependencies
Updating anyhow v1.0.65 -> v1.0.66
Updating async-trait v0.1.57 -> v0.1.58
Updating axum v0.5.16 -> v0.5.17
Updating axum-core v0.2.8 -> v0.2.9
Updating base64 v0.13.0 -> v0.13.1
Updating bumpalo v3.11.0 -> v3.11.1
Updating cc v1.0.73 -> v1.0.74
Updating cxx v1.0.79 -> v1.0.80
Updating cxx-build v1.0.79 -> v1.0.80
Updating cxxbridge-flags v1.0.79 -> v1.0.80
Updating cxxbridge-macro v1.0.79 -> v1.0.80
Updating futures v0.3.24 -> v0.3.25
Updating futures-channel v0.3.24 -> v0.3.25
Updating futures-core v0.3.24 -> v0.3.25
Updating futures-executor v0.3.24 -> v0.3.25
Updating futures-io v0.3.24 -> v0.3.25
Updating futures-macro v0.3.24 -> v0.3.25
Updating futures-sink v0.3.24 -> v0.3.25
Updating futures-task v0.3.24 -> v0.3.25
Updating futures-util v0.3.24 -> v0.3.25
Updating getrandom v0.2.7 -> v0.2.8
Updating h2 v0.3.14 -> v0.3.15
Updating hyper v0.14.20 -> v0.14.22
Updating iana-time-zone v0.1.51 -> v0.1.53
Updating libc v0.2.135 -> v0.2.137
Updating mio v0.8.4 -> v0.8.5
Updating native-tls v0.2.10 -> v0.2.11
Updating num_cpus v1.13.1 -> v1.14.0
Updating once_cell v1.15.0 -> v1.16.0
Updating openssl-sys v0.9.76 -> v0.9.77
Updating parking_lot_core v0.9.3 -> v0.9.4
Updating pest v2.4.0 -> v2.4.1
Updating pest_derive v2.4.0 -> v2.4.1
Updating pest_generator v2.4.0 -> v2.4.1
Updating pest_meta v2.4.0 -> v2.4.1
Updating pkg-config v0.3.25 -> v0.3.26
Updating ppv-lite86 v0.2.16 -> v0.2.17
Updating prost v0.11.0 -> v0.11.2
Updating prost-derive v0.11.0 -> v0.11.2
Updating prost-types v0.11.1 -> v0.11.2
Updating serde v1.0.145 -> v1.0.147
Updating serde_derive v1.0.145 -> v1.0.147
Updating serde_json v1.0.86 -> v1.0.87
Updating syn v1.0.102 -> v1.0.103
  Adding windows-sys v0.42.0
  Adding windows_aarch64_gnullvm v0.42.0
  Adding windows_aarch64_msvc v0.42.0
  Adding windows_i686_gnu v0.42.0
  Adding windows_i686_msvc v0.42.0
  Adding windows_x86_64_gnu v0.42.0
  Adding windows_x86_64_gnullvm v0.42.0
  Adding windows_x86_64_msvc v0.42.0
2022-11-05 10:59:03 -05:00
Greg Heartsfield
e8aa450802 build: bump version to 0.7.1 2022-11-05 10:35:38 -05:00
Greg Heartsfield
5a8860bb09 feat: log user-agent if present 2022-11-05 10:29:25 -05:00
Greg Heartsfield
11e43eccf9 refactor: add unit to ping_interval config 2022-11-05 07:42:08 -05:00
William Casarin
50577b2dfa feat: add network.ping_interval setting
Add a ping interval setting that allows you to customize the websocket
ping interval. The default of 5 minutes may be too high for some proxy
servers that disconnect connections that are held open for too long.
2022-11-05 07:40:28 -05:00
William Casarin
a6cb6f8486 refactor: rename get_header_remote_ip -> get_header_string
This function has nothing to do with remote ips!
2022-11-05 07:37:18 -05:00
Greg Heartsfield
ae5bf98d87 feat: retrieve client IP from header in config.toml
If the config.toml has defined a HTTP header to look for a remote IP,
that will be logged.  Otherwise, the socket address IP will be used.

closes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/47
2022-11-04 18:05:01 -05:00
William Casarin
1cf9d719f0 feat: look for proxied ip headers
This enables support for using the proxied IP from cloudflare. The damus
relay is behind cloudflare, so to get accurate remote ip logging we need
to look at the headers instead of the socket address.

Signed-off-by: William Casarin <jb55@jb55.com>
2022-11-04 17:09:28 -05:00
William Casarin
311f4b5283 refactor: switch new connections to debug log
These are pretty spammy on busy relays. I've been using the info log to
monitor spam attacks, and these are the least useful info log.

Leave the "stopping connection" log because it at least provides useful
sent/received information.

Signed-off-by: William Casarin <jb55@jb55.com>
2022-11-04 07:59:53 -05:00
Greg Heartsfield
14b5a51e3a fix: log ephemeral events after send 2022-11-04 07:55:38 -05:00
Greg Heartsfield
8ecce3f566 feat: show client IP in logs 2022-11-02 18:33:44 -05:00
Greg Heartsfield
caffbbbede build: bump version to 0.7.0 2022-10-16 15:42:11 -05:00
Greg Heartsfield
81045ad3d0 improvement: upgrade multiple dependencies
Updating anyhow v1.0.64 -> v1.0.65
  Adding codespan-reporting v0.11.1
Updating const_format v0.2.28 -> v0.2.30
Updating const_format_proc_macros v0.2.22 -> v0.2.29
Updating crossbeam-utils v0.8.11 -> v0.8.12
  Adding cxx v1.0.79
  Adding cxx-build v1.0.79
  Adding cxxbridge-flags v1.0.79
  Adding cxxbridge-macro v1.0.79
Updating digest v0.10.3 -> v0.10.5
Updating hdrhistogram v7.5.1 -> v7.5.2
Updating iana-time-zone v0.1.50 -> v0.1.51
  Adding iana-time-zone-haiku v0.1.1
Updating itertools v0.10.3 -> v0.10.5
Updating itoa v1.0.3 -> v1.0.4
Updating js-sys v0.3.59 -> v0.3.60
Updating libc v0.2.132 -> v0.2.135
  Adding link-cplusplus v1.0.7
Updating lock_api v0.4.8 -> v0.4.9
Updating once_cell v1.14.0 -> v1.15.0
Updating openssl v0.10.41 -> v0.10.42
Updating openssl-sys v0.9.75 -> v0.9.76
Updating pest v2.3.0 -> v2.4.0
Updating pest_derive v2.3.0 -> v2.4.0
Updating pest_generator v2.3.0 -> v2.4.0
Updating pest_meta v2.3.0 -> v2.4.0
Updating proc-macro2 v1.0.43 -> v1.0.47
Updating rand_core v0.6.3 -> v0.6.4
Updating raw-cpuid v10.5.0 -> v10.6.0
  Adding scratch v1.0.2
Updating serde v1.0.144 -> v1.0.145
Updating serde_derive v1.0.144 -> v1.0.145
Updating serde_json v1.0.85 -> v1.0.86
  Adding sha1 v0.10.5
Updating smallvec v1.9.0 -> v1.10.0
Updating syn v1.0.99 -> v1.0.102
  Adding termcolor v1.1.3
Updating thiserror v1.0.34 -> v1.0.37
Updating thiserror-impl v1.0.34 -> v1.0.37
Updating tokio v1.21.0 -> v1.21.2
Updating tokio-stream v0.1.9 -> v0.1.11
Updating tonic v0.8.1 -> v0.8.2
Updating tower-layer v0.3.1 -> v0.3.2
Updating tracing v0.1.36 -> v0.1.37
Updating tracing-attributes v0.1.22 -> v0.1.23
Updating tracing-core v0.1.29 -> v0.1.30
Updating tracing-subscriber v0.3.15 -> v0.3.16
Updating unicode-ident v1.0.3 -> v1.0.5
Updating unicode-normalization v0.1.21 -> v0.1.22
  Adding unicode-width v0.1.10
Updating uuid v1.1.2 -> v1.2.1
Updating wasm-bindgen v0.2.82 -> v0.2.83
Updating wasm-bindgen-backend v0.2.82 -> v0.2.83
Updating wasm-bindgen-macro v0.2.82 -> v0.2.83
Updating wasm-bindgen-macro-support v0.2.82 -> v0.2.83
Updating wasm-bindgen-shared v0.2.82 -> v0.2.83
Updating web-sys v0.3.59 -> v0.3.60
  Adding winapi-util v0.1.5
2022-10-16 15:33:11 -05:00
Greg Heartsfield
72f8a1aa5c feat(NIP-26): allow searches for delegated public keys
Implements core NIP-26 delegated event functionality.  Events can
include a `delegation` tag that provides a signature and restrictions
on which events can be delegated.

Notable points on the implementation so far:

* Schema has been upgraded to include an index and new column.
* Basic rune parsing/evaluation to implement the example event in the
  NIP, but no more.
* No special logic for deletion.
* No migration logic for determining delegated authors for
  already-stored events.
2022-10-16 15:25:06 -05:00
Greg Heartsfield
274c61bb72 improvement: upgrade docker images for base & builder 2022-10-13 18:42:55 -05:00
Greg Heartsfield
6eeefbcc4c feat: quick script for making non-x86 Dockerfiles 2022-10-13 18:35:33 -05:00
Greg Heartsfield
3e8adf978f refactor: move db migrations into isolated functions 2022-10-09 08:54:03 -05:00
Greg Heartsfield
2af5f9fbe8 fix: correct schema upgrade logic (and refactor)
Schema upgrades were buggy from 4->5 (the v5 would be skipped).  This
change also refactors the logic slightly so that future additions can
be clearer (no need to have if and else-if combinations).
2022-10-09 08:24:01 -05:00
Greg Heartsfield
2739e49362 fix: correct future schema version detection 2022-10-08 13:15:48 -05:00
Greg Heartsfield
f9693f7ac3 fix(NIP-9): hide events received after their deletions
fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/40
2022-10-08 12:12:41 -05:00
Greg Heartsfield
8a63d88b0b fix: prevent deletion of deletion events 2022-10-08 08:02:16 -05:00
Greg Heartsfield
a4df9445b6 test: improve port selection 2022-09-28 07:55:06 -05:00
Greg Heartsfield
92da9d71f8 feat: handle and log TERM signals 2022-09-28 07:20:31 -05:00
Greg Heartsfield
6633f8b472 feat: replace logging with tracing 2022-09-28 07:19:59 -05:00
Greg Heartsfield
93dfed0a87 refactor: misc clippy suggestions 2022-09-24 19:28:02 -05:00
Greg Heartsfield
bef7ca7e27 refactor: misc clippy suggestions 2022-09-24 09:19:16 -05:00
Greg Heartsfield
a98708ba47 refactor: misc clippy suggestions 2022-09-24 09:01:09 -05:00
Greg Heartsfield
ccf9b8d47b refactor: remove unnecessary return types 2022-09-24 08:39:41 -05:00
Greg Heartsfield
8fa58de49a refactor: clippy suggestions 2022-09-24 08:30:22 -05:00
Greg Heartsfield
480c5e4e58 docs: un-link NIP-22 note 2022-09-19 19:34:11 -05:00
dzdidi
5bd00f9107 docs: add refs for nostr-protocol organization
Signed-off-by: dzdidi <deniszalessky@gmail.com>
2022-09-19 19:26:36 -05:00
Greg Heartsfield
36b9f628c7 test: check for relay health after startup 2022-09-17 16:02:57 -05:00
Greg Heartsfield
baeb77af99 test: dynamically find open port for test relay 2022-09-17 14:36:05 -05:00
Greg Heartsfield
29b1e8ce58 refactor: move common test code into module 2022-09-17 12:37:49 -05:00
Greg Heartsfield
786a354776 test: simple integration test to start and stop relay 2022-09-11 12:54:24 -05:00
Greg Heartsfield
4fa8616c73 feat: enable use of tokio-console with diagnostics.tracing setting
View real-time tokio diagnostics by setting the configuration option
"diagnostics.tracing" to true.
2022-09-11 12:44:45 -05:00
Greg Heartsfield
74802522c2 improvement: do not create NIP-05 thread if feature is disabled 2022-09-11 11:01:36 -05:00
Greg Heartsfield
9ce5057af8 improvement: better log formatting 2022-09-11 10:22:01 -05:00
Greg Heartsfield
217429f538 build: add release flags, save artifacts 2022-09-11 10:21:29 -05:00
Greg Heartsfield
62a9548c27 docs: show build status for master branch only 2022-09-10 22:53:41 -05:00
Greg Heartsfield
c24dce8177 docs: add build status indicator 2022-09-10 22:48:23 -05:00
Greg Heartsfield
3503cf05ed build: add sr.ht build manifest 2022-09-10 22:43:56 -05:00
Greg Heartsfield
8738e5baa9 improvement: upgrade multiple dependencies
Updating aho-corasick v0.7.18 -> v0.7.19
Updating block-buffer v0.10.2 -> v0.10.3
Updating cpufeatures v0.2.4 -> v0.2.5
Updating form_urlencoded v1.0.1 -> v1.1.0
Updating idna v0.2.3 -> v0.3.0
Removing matches v0.1.9
Updating percent-encoding v2.1.0 -> v2.2.0
Updating thiserror v1.0.33 -> v1.0.34
Updating thiserror-impl v1.0.33 -> v1.0.34
Updating tokio-util v0.7.3 -> v0.7.4
Updating ucd-trie v0.1.4 -> v0.1.5
Updating url v2.2.2 -> v2.3.1
2022-09-10 22:42:52 -05:00
Greg Heartsfield
78da92ccca feat: advertise support for NIP-09 and NIP-12 in relay info
NIP-01 prefix search, and NIP-12 generic tags are no longer marked as
experimental.

NIP-11 relay info advertises NIP-09 event deletion and NIP-12 generic
tag search support.
2022-09-10 20:45:09 -05:00
Greg Heartsfield
72f1c19b21 feat(NIP-22): advertise support for event created_at limits
The `reject_future_limits` option can now be disabled, and is by
default.

NIP-11 advertises support for created_at limits.

The message for future-dated events has been modified, to be closer to
the recommended example in the NIP.
2022-09-10 20:40:10 -05:00
Greg Heartsfield
283967f8cc docs: reference NIP-28 channel 2022-09-10 19:45:23 -05:00
Greg Heartsfield
08b011ad07 feat: ensure that WAL is truncated after checkpoint 2022-09-10 19:18:57 -05:00
Greg Heartsfield
2b03f11e5e refactor: remove global/singleton settings object 2022-09-06 06:12:07 -05:00
Greg Heartsfield
e48bae10e6 feat: support in-memory SQLite database 2022-09-06 06:06:01 -05:00
Greg Heartsfield
8774416b92 refactor: move nostr server into library 2022-09-06 05:56:04 -05:00
Greg Heartsfield
59933ce25e build: add pre-commit config 2022-09-06 05:44:22 -05:00
Greg Heartsfield
1b9f364e15 chore: rustfmt 2022-09-02 12:38:31 -05:00
Greg Heartsfield
4d983dd1e0 improvement: upgrade uuid dependency 2022-09-02 12:37:11 -05:00
Greg Heartsfield
11c33582ef improvement: remove useless carats from Cargo.toml deps 2022-09-02 12:35:02 -05:00
Greg Heartsfield
a754477a02 improvement: misc refactorings (clippy) 2022-09-02 12:26:00 -05:00
Greg Heartsfield
a843eaa939 improvement: db.rs from clippy 2022-09-02 10:30:51 -05:00
Greg Heartsfield
03a130b0b8 improvement: simplify config builder (clippy) 2022-09-02 10:18:16 -05:00
Greg Heartsfield
9124f4540a improvement: upgrade multiple dependencies
Updating cpufeatures v0.2.3 -> v0.2.4
Updating dashmap v5.3.4 -> v5.4.0
Updating futures v0.3.23 -> v0.3.24
Updating futures-channel v0.3.23 -> v0.3.24
Updating futures-core v0.3.23 -> v0.3.24
Updating futures-executor v0.3.23 -> v0.3.24
Updating futures-io v0.3.23 -> v0.3.24
Updating futures-macro v0.3.23 -> v0.3.24
Updating futures-sink v0.3.23 -> v0.3.24
Updating futures-task v0.3.23 -> v0.3.24
Updating futures-util v0.3.23 -> v0.3.24
Updating httparse v1.7.1 -> v1.8.0
Updating lock_api v0.4.7 -> v0.4.8
Updating once_cell v1.13.1 -> v1.14.0
Updating pest v2.2.1 -> v2.3.0
Updating pest_derive v2.2.1 -> v2.3.0
Updating pest_generator v2.2.1 -> v2.3.0
Updating pest_meta v2.2.1 -> v2.3.0
Updating serde v1.0.143 -> v1.0.144
Updating serde_derive v1.0.143 -> v1.0.144
Updating serde_json v1.0.83 -> v1.0.85
Updating socket2 v0.4.4 -> v0.4.7
Updating thiserror v1.0.32 -> v1.0.33
Updating thiserror-impl v1.0.32 -> v1.0.33
Updating tokio v1.20.1 -> v1.21.0
2022-09-02 10:08:14 -05:00
slaninas
77892b2064 fix: syntax error 2022-08-22 05:12:52 -07:00
Greg Heartsfield
4fe6191aa3 chore: formatting 2022-08-21 09:51:34 -07:00
Greg Heartsfield
79a982e3ef improvement: send NOTICE for too-large messages 2022-08-21 09:28:31 -07:00
Greg Heartsfield
01d81db617 improvement: log client id for subscription removal 2022-08-21 09:11:38 -07:00
Greg Heartsfield
e6fef37d4e chore: rustfmt 2022-08-21 09:10:19 -07:00
plantimals
4bbfd77fc1 docs: add NGINX configuration example
resolves https://github.com/scsibug/nostr-rs-relay/issues/12
2022-08-20 09:35:01 -07:00
Greg Heartsfield
8da6f6555a build: bump version to 0.6.2 2022-08-18 17:52:16 -07:00
Greg Heartsfield
5bcc63bd56 improvement: upgrade multiple dependencies
Updating async-trait v0.1.56 -> v0.1.57
Removing block-buffer v0.7.3
Removing block-padding v0.1.5
Updating bumpalo v3.10.0 -> v3.11.0
Removing byte-tools v0.3.1
Updating bytes v1.1.0 -> v1.2.1
Updating cpufeatures v0.2.2 -> v0.2.3
Updating crossbeam-utils v0.8.10 -> v0.8.11
Updating crypto-common v0.1.4 -> v0.1.6
Removing digest v0.8.1
Removing fake-simd v0.1.2
Updating fastrand v1.7.0 -> v1.8.0
Updating futures v0.3.21 -> v0.3.23
Updating futures-channel v0.3.21 -> v0.3.23
Updating futures-core v0.3.21 -> v0.3.23
Updating futures-executor v0.3.21 -> v0.3.23
Updating futures-io v0.3.21 -> v0.3.23
Updating futures-macro v0.3.21 -> v0.3.23
Updating futures-sink v0.3.21 -> v0.3.23
Updating futures-task v0.3.21 -> v0.3.23
Updating futures-util v0.3.21 -> v0.3.23
Removing generic-array v0.12.4
Removing generic-array v0.14.5
Adding   generic-array v0.14.6
Updating h2 v0.3.13 -> v0.3.14
Updating hashbrown v0.12.1 -> v0.12.3
Updating hyper v0.14.19 -> v0.14.20
Updating itoa v1.0.2 -> v1.0.3
Updating js-sys v0.3.58 -> v0.3.59
Updating libc v0.2.126 -> v0.2.132
Removing maplit v1.0.2
Updating once_cell v1.12.1 -> v1.13.1
Removing opaque-debug v0.2.3
Updating openssl v0.10.40 -> v0.10.41
Updating openssl-sys v0.9.74 -> v0.9.75
Updating pest v2.1.3 -> v2.2.1
Updating pest_derive v2.1.0 -> v2.2.1
Updating pest_generator v2.1.3 -> v2.2.1
Updating pest_meta v2.1.3 -> v2.2.1
Updating proc-macro2 v1.0.40 -> v1.0.43
Updating quote v1.0.20 -> v1.0.21
Updating raw-cpuid v10.3.0 -> v10.5.0
Updating redox_syscall v0.2.13 -> v0.2.16
Updating regex v1.5.6 -> v1.6.0
Updating regex-syntax v0.6.26 -> v0.6.27
Updating ryu v1.0.10 -> v1.0.11
Updating security-framework v2.6.1 -> v2.7.0
Updating serde v1.0.138 -> v1.0.143
Updating serde_derive v1.0.138 -> v1.0.143
Updating serde_json v1.0.82 -> v1.0.83
Removing sha-1 v0.8.2
Updating slab v0.4.6 -> v0.4.7
Updating syn v1.0.98 -> v1.0.99
Updating thiserror v1.0.31 -> v1.0.32
Updating thiserror-impl v1.0.31 -> v1.0.32
Updating tokio v1.19.2 -> v1.20.1
Updating tokio-tungstenite v0.17.1 -> v0.17.2
Updating tracing v0.1.35 -> v0.1.36
Updating tracing-core v0.1.28 -> v0.1.29
Updating tungstenite v0.17.2 -> v0.17.3
Updating ucd-trie v0.1.3 -> v0.1.4
Updating unicode-ident v1.0.1 -> v1.0.3
Updating wasm-bindgen v0.2.81 -> v0.2.82
Updating wasm-bindgen-backend v0.2.81 -> v0.2.82
Updating wasm-bindgen-macro v0.2.81 -> v0.2.82
Updating wasm-bindgen-macro-support v0.2.81 -> v0.2.82
Updating wasm-bindgen-shared v0.2.81 -> v0.2.82
Updating web-sys v0.3.58 -> v0.3.59
2022-08-18 17:21:53 -07:00
Greg Heartsfield
035cf34673 fix(NIP-12): correctly search for mixed-case hex-like tags
Only lowercase and even-length tag values are stored as binary BLOBs.
Previously there was an error which search results from being returned
if the tag value was mixed-case and could be interpreted as hex.

A new database migration has been created to repair the `tag` table
for existing relays.

fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/37
2022-08-17 16:34:11 -07:00
Greg Heartsfield
be8170342e fix(NIP-12): multi-tag searches returns correct results
Logic of generated SQL was incorrect, causing multiple tag searches
(as defined in NIP-12) to produce no results.

fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/36
2022-08-11 22:16:10 -07:00
Greg Heartsfield
0a3b15f41f fix(NIP-11): Add CORS header and content type for main page 2022-08-11 19:33:17 -07:00
Kirill Kovalenko
2b4b17dbda fix: windows compilation with bundled sqlite3
Using 'bundled' is recommended by
https://github.com/rusqlite/rusqlite#usage to avoid common build
issues
2022-08-07 10:35:36 -05:00
Greg Heartsfield
5058d98ad6 fix(NIP-12): only allow single-char tag filters 2022-08-07 10:15:36 -05:00
Greg Heartsfield
f4ecd43708 build: bump version to 0.6.1 2022-07-04 17:41:16 -05:00
Greg Heartsfield
a8f465fdc8 improvement: upgrade docker base images (and specify explicit repository) 2022-07-04 17:35:17 -05:00
Greg Heartsfield
1c14adc766 fix(NIP-01): allow limits on a per-filter basis
The original implementation of subscription limit applied to the
entire query, instead of the specific filter.  Now, each filter gets
its own query limit.  When a limit is applied, the most recent N
events will be returned, otherwise the default is to return the
earliest events (in order), for all matching events.
2022-07-04 17:25:32 -05:00
Greg Heartsfield
e894a86566 docs: NIP-15, NIP-16 feature notes in README 2022-07-04 13:10:48 -05:00
Greg Heartsfield
bedc378624 improvement: upgrade multiple dependencies
Updating async-trait v0.1.53 -> v0.1.56
Updating bumpalo v3.9.1 -> v3.10.0
Updating crossbeam-utils v0.8.8 -> v0.8.10
Updating crypto-common v0.1.3 -> v0.1.4
Updating getrandom v0.2.6 -> v0.2.7
Updating http v0.2.7 -> v0.2.8
Updating indexmap v1.8.2 -> v1.9.1
Updating js-sys v0.3.57 -> v0.3.58
Updating linked-hash-map v0.5.4 -> v0.5.6
Updating mio v0.8.3 -> v0.8.4
Updating once_cell v1.12.0 -> v1.12.1
Updating openssl-sys v0.9.73 -> v0.9.74
Removing parking_lot v0.11.2
Removing parking_lot_core v0.8.5
Updating proc-macro2 v1.0.39 -> v1.0.40
Updating quote v1.0.18 -> v1.0.20
Updating r2d2 v0.8.9 -> v0.8.10
Updating ron v0.7.0 -> v0.7.1
Updating serde v1.0.137 -> v1.0.138
Updating serde_derive v1.0.137 -> v1.0.138
Updating serde_json v1.0.81 -> v1.0.82
Updating smallvec v1.8.0 -> v1.9.0
Updating syn v1.0.95 -> v1.0.98
Updating tokio v1.18.2 -> v1.19.2
Updating tokio-macros v1.7.0 -> v1.8.0
Updating tokio-util v0.7.2 -> v0.7.3
Updating tower-service v0.3.1 -> v0.3.2
Updating tracing v0.1.34 -> v0.1.35
Removing tracing-attributes v0.1.21
Updating tracing-core v0.1.26 -> v0.1.28
Updating unicode-ident v1.0.0 -> v1.0.1
Updating unicode-normalization v0.1.19 -> v0.1.21
Updating wasm-bindgen v0.2.80 -> v0.2.81
Updating wasm-bindgen-backend v0.2.80 -> v0.2.81
Updating wasm-bindgen-macro v0.2.80 -> v0.2.81
Updating wasm-bindgen-macro-support v0.2.80 -> v0.2.81
Updating wasm-bindgen-shared v0.2.80 -> v0.2.81
Updating web-sys v0.3.57 -> v0.3.58
2022-07-04 12:56:10 -05:00
Greg Heartsfield
e1c2a6b758 improvement: upgrade docker base image 2022-05-30 21:53:46 -05:00
Greg Heartsfield
990bb656e8 improvement: upgrade multiple dependencies
Cargo updated the following dependencies:

Updating dashmap v5.3.3 -> v5.3.4
Updating http-body v0.4.4 -> v0.4.5
Updating hyper v0.14.18 -> v0.14.19
Updating indexmap v1.8.1 -> v1.8.2
Updating itoa v1.0.1 -> v1.0.2
Updating libc v0.2.125 -> v0.2.126
Updating once_cell v1.10.0 -> v1.12.0
Updating parking_lot v0.12.0 -> v0.12.1
Updating proc-macro2 v1.0.38 -> v1.0.39
Updating regex v1.5.5 -> v1.5.6
Updating regex-syntax v0.6.25 -> v0.6.26
Updating ryu v1.0.9 -> v1.0.10
Updating schannel v0.1.19 -> v0.1.20
Updating scheduled-thread-pool v0.2.5 -> v0.2.6
Updating syn v1.0.93 -> v1.0.95
Updating tokio-util v0.7.1 -> v0.7.2

Adding unicode-ident v1.0.0

Removing unicode-xid v0.2.3
2022-05-30 21:47:24 -05:00
Semisol
168cfc3b26 feat(NIP-16): Implement NIP16
NIP16 introduces a replaceable and ephemeral event range:
[10000..20000) for replaceable and [20000..30000) for
ephemeral.
2022-05-30 21:43:06 -05:00
Semisol
a36ad378f6 feat(NIP-15): Implement NIP15
NIP15 sends an EOSE notice to clients after all stored events are sent
to allow loading indicators and other use cases.
2022-05-30 21:43:00 -05:00
Greg Heartsfield
538d139ebf improvement: upgrade docker base image 2022-05-10 21:24:22 -05:00
Greg Heartsfield
23f7730fea build: bump version to 0.6.0 2022-05-10 21:19:21 -05:00
Greg Heartsfield
8aa1256254 improvement: upgrade multiple dependencies 2022-05-10 17:07:18 -05:00
Greg Heartsfield
9ed3391b46 fix(NIP-09): correct WHERE clause for event deletion 2022-05-10 16:50:52 -05:00
William Casarin
4ad483090e feat(NIP-01): Implement limit
This was quickly sneaked in by fiatjaf per my request[0], it makes many
queries more efficient and allows for paging when combined with until.

It is a bit weird to have multiple limits on each filter... for now we
just choose any or the last limit seen.

[0]: a4aea5337f

Signed-off-by: William Casarin <jb55@jb55.com>
2022-05-10 16:47:56 -05:00
Greg Heartsfield
9b351aab9b docs: update devel discussion link 2022-02-28 17:19:24 -06:00
Greg Heartsfield
597749890e improvement: remove unnecessary event logging 2022-02-27 19:30:48 -06:00
Greg Heartsfield
1d499cf12b feat: handle NIP-09 for deletion events 2022-02-27 11:35:23 -06:00
Greg Heartsfield
ed3a6b9692 refactor: simplify NOTICE messages 2022-02-26 17:34:58 -06:00
Greg Heartsfield
048199e30b build: bump version to 0.5.2 2022-02-26 11:22:16 -06:00
Greg Heartsfield
414e83f696 refactor: import cleanup for config 2022-02-26 11:16:12 -06:00
Greg Heartsfield
225c8f762e improvement: upgrade dependencies; config, tungstenite, tokio 2022-02-26 09:55:12 -06:00
Greg Heartsfield
887fc28ab2 fix: until filters in subscriptions now used 2022-02-26 09:15:45 -06:00
Greg Heartsfield
294d3b99c3 fix: correct imports for test cases 2022-02-26 09:07:07 -06:00
Greg Heartsfield
53990672ae improvement: move db pool operations closer to query, do not panic on failure 2022-02-23 16:38:16 -06:00
Greg Heartsfield
9c1b21cbfe improvement: more granular perf logging for SQL queries 2022-02-21 09:03:05 -06:00
Greg Heartsfield
2f63417646 improvement: better logging for connection resets 2022-02-21 08:57:07 -06:00
Greg Heartsfield
3b25160852 fix: abort on connection IO errors 2022-02-21 08:50:46 -06:00
Greg Heartsfield
34ad549cde fix: update event buffer size comment in config 2022-02-20 11:46:24 -06:00
Greg Heartsfield
f8b1fe5035 docs: line up comments with code 2022-02-17 16:18:05 -06:00
Greg Heartsfield
f2001dc34a build: bump version to 0.5.1 2022-02-13 09:38:45 -06:00
Greg Heartsfield
b593001229 fix: remove setting from example config 2022-02-13 09:37:05 -06:00
Greg Heartsfield
5913b9f87a feat: send notices when authorization checks fail 2022-02-13 09:35:54 -06:00
Greg Heartsfield
77f35f9f43 feat: server-side pings and disconnects 2022-02-12 16:57:26 -06:00
Greg Heartsfield
9e06cc9482 improvement: better error messages on parse failures 2022-02-12 16:33:29 -06:00
Greg Heartsfield
e66fa4ac42 refactor: remove unnecessary Option wrapping 2022-02-12 16:29:27 -06:00
Greg Heartsfield
99e117f620 improvement: better handling of out-of-protocol messages 2022-02-12 16:26:55 -06:00
Greg Heartsfield
8250e00f05 fix: remove protostream module, and missing NOTICE 2022-02-12 16:22:12 -06:00
Greg Heartsfield
c9f87ec563 docs: NIP-05 feature note in README 2022-02-12 16:19:46 -06:00
Greg Heartsfield
ceaa01e8b4 fix: removed manual nostr stream, so websocket pings work 2022-02-12 16:19:10 -06:00
Greg Heartsfield
bc68cd0c74 build: bump version to 0.5.0 2022-02-12 14:10:44 -06:00
Greg Heartsfield
97589006fa improvement: upgrade dependencies 2022-02-12 14:10:03 -06:00
Greg Heartsfield
e31d0729f2 chore: comment cleanup 2022-02-12 13:49:52 -06:00
Greg Heartsfield
89d96e7ccd improvement: upgraded database schema to drop legacy tables
Database schema is upgraded to version 5.  Legacy event and pubkey
tables are dropped, and indexes are added for NIP-05 verification.
2022-02-12 13:47:03 -06:00
Greg Heartsfield
7056aae227 refactor: create schema module 2022-02-12 09:58:42 -06:00
Greg Heartsfield
753df47443 refactor: create utils/hexrange utility modules 2022-02-12 09:29:38 -06:00
Greg Heartsfield
26a0ce2b32 docs: function/struct comments 2022-02-12 09:29:35 -06:00
Greg Heartsfield
fa66a0265e docs: module headers 2022-02-12 09:29:31 -06:00
Greg Heartsfield
234a8ba0ac feat: limit event publishing to NIP-05 verified users
This adds a new configurable feature to restrict event publishing to
only users with NIP-05 verified metadata.  Domains can be whitelisted
or blacklisted.  Verification expiration and schedules are
configurable.

This upgrades the database to add a table for tracking verification
records.
2022-02-12 09:29:25 -06:00
Greg Heartsfield
f679fa0893 build: bump version to 0.4.2 2022-01-30 15:19:41 -06:00
Greg Heartsfield
4cc313fa2d fix: cleanup database connections with same name
When a large number of subscriptions is created with identical names,
we do not send a signal over the abandon-read channel.  This
eventually leads to resource exhaustion.
2022-01-30 15:14:02 -06:00
Greg Heartsfield
6502f7dcd7 fix: do not panic when validating events with malformed pubkeys 2022-01-29 13:19:34 -06:00
Greg Heartsfield
6ca3e3ffea build: bump version to 0.4.1 2022-01-26 21:48:44 -06:00
Greg Heartsfield
49c668a07c improvement: upgrade dependency (h2) 2022-01-26 21:48:11 -06:00
Greg Heartsfield
98c6fa6f39 feat: allow whitelisting of pubkeys for new events
This adds a configuration option, `authorization.pubkey_whitelist`
which is an array of pubkeys that are allowed to publish events on
this relay.
2022-01-26 21:39:03 -06:00
Greg Heartsfield
452bbbb0e5 docs: update feature list (NIP-12, prefix search) 2022-01-26 07:24:04 -06:00
Greg Heartsfield
ee0de6f875 improvement: clearer and less verbose database logging 2022-01-25 21:42:43 -06:00
Greg Heartsfield
699489ebaf build: bump version to 0.4.0 2022-01-25 20:56:00 -06:00
Greg Heartsfield
af9da65f71 improvement: upgrade dependencies 2022-01-25 20:55:29 -06:00
Greg Heartsfield
a72eaec3b8 fix: never display hidden events 2022-01-25 20:48:46 -06:00
Greg Heartsfield
f1206e76f2 feat: database reader connection pooling
Added connection pooling for queries, as well as basic configuration
options for min/max connections.
2022-01-25 20:39:24 -06:00
Greg Heartsfield
af453548ee feat: allow author and event id prefix search
This is an experimental non-NIP feature that allows a subscription
filter to include a prefix for authors and events.
2022-01-25 18:23:08 -06:00
Greg Heartsfield
df251c821c docs: updated discord invite link 2022-01-25 07:43:15 -06:00
Greg Heartsfield
2d28a95ff7 feat: allow arbitrary tag queries
This is an experimental feature, outside of any NIP, that demonstrates
generic tag queries.

Instead of limiting subscription filters to just querying only "e" or
"p" tags (via `#e` or `#p` attributes), any tag can be queried.

As an example, consider an event which uses a tag "url".  With this
modification, a subscription filter could add a top-level field
"#url", with an array of strings as the key.  Exact matches would be
returned.

A NIP is forthcoming to formalize this.
2022-01-22 21:29:15 -06:00
Greg Heartsfield
8c93ef5bc2 docs: provide public docker hub link 2022-01-20 22:02:42 -06:00
Greg Heartsfield
1c0fc1326d docs: add timeout for reverse-proxy example 2022-01-19 21:19:12 -06:00
Raj
179928378e refactor: add strictly typed tags
* Add custom error variant

This can be useful to propagate errors not conforming to available
variants. Also to convert other errors in `crate::Error` without having
explicit conversion defined, with `error.to_string()`

* Implement `Tag` and define protocol serialization

A Tag structure have been implemented with dedicated field types. Then
custom serde serialization is derived to map the structure to current
protocol json array as per NIP01.

This adds compile and run time type checking to always ensure wrong
string data are never stored or processed. With strict typed fields and
custom serde derivation this checks can be done at time of serialization,
saving work for internal handling of the actual data.

tests for possible data violations are added, and gives good example of
kind of errors it will through for different cases.

* Use String for URL
2022-01-19 07:42:58 -06:00
Raj
c605d75bb4 docs: update readme to include the new discord server 2022-01-17 08:35:13 -06:00
Greg Heartsfield
81e4e2b892 feat: add supported NIPs (2, 11) to relay info 2022-01-16 08:37:21 -06:00
Greg Heartsfield
6f166433b5 fix: test failures 2022-01-16 08:36:52 -06:00
Greg Heartsfield
030b64de62 feat: replace email with contact field in relay info.
This finalizes the NIP-11 spec implementation.

Fixes https://todo.sr.ht/~gheartsfield/nostr-rs-relay/21.
2022-01-16 08:34:19 -06:00
Greg Heartsfield
c7eadb1154 Add feature list to README 2022-01-16 08:16:42 -06:00
Greg Heartsfield
62dc77369d docs: rename example relay server 2022-01-15 11:43:12 -06:00
Greg Heartsfield
24587435ca docs: reverse proxy example 2022-01-15 11:41:31 -06:00
Greg Heartsfield
a3124ccea4 improvement: better sql error handling 2022-01-15 09:42:53 -06:00
Greg Heartsfield
4e51e61d16 improvement: display rate limit messages max once per sec 2022-01-15 09:42:17 -06:00
Raj
5c8390bbe0 fix: fix some test failures 2022-01-14 14:27:12 -06:00
Greg Heartsfield
da7968efef fix: restore working websocket message size configuration options 2022-01-05 17:41:12 -05:00
Greg Heartsfield
7037555516 improvement: add indexed tag queries 2022-01-05 17:33:53 -05:00
Greg Heartsfield
19ed990c57 refactor: fix clippy errors for relay info response 2022-01-05 10:10:44 -05:00
Greg Heartsfield
d78bbfc290 build: bump version to 0.3.3 2022-01-03 22:07:15 -05:00
Greg Heartsfield
2924da88bc feat: incorporated improvements from NIP-11 discussion
Change descr to description.  Add `id` for websocket URL.  Use
integers for supported NIPs instead of strings.  Top-level is object,
instead of the array before.
2022-01-03 22:03:30 -05:00
Greg Heartsfield
3024e9fba4 build: bump version to 0.3.2 2022-01-03 18:43:17 -05:00
Greg Heartsfield
d3da4eb009 feat: implementation of proposed NIP-11 (server metadata) 2022-01-03 18:42:24 -05:00
Greg Heartsfield
19637d612e build: bump version to 0.3.1 2022-01-01 19:26:15 -06:00
Greg Heartsfield
afc9a0096a improvement: logging failed queries and timing 2022-01-01 19:25:09 -06:00
Greg Heartsfield
3d56262386 build: bump version to 0.3.0 2022-01-01 18:40:57 -06:00
Greg Heartsfield
6673fcfd11 feat: implement multi-valued filter searching
NIP-01 now uses arrays instead of scalars.

Fixes https://todo.sr.ht/~gheartsfield/nostr-rs-relay/17
2022-01-01 18:38:52 -06:00
Greg Heartsfield
b5da3fa2b0 docs: link to docker hub 2022-01-01 12:27:09 -06:00
Greg Heartsfield
850957213e build: bump version to 0.2.3 2022-01-01 09:13:13 -06:00
Greg Heartsfield
1aa5a5458d improvement: event signature validation is 100x faster
Switched to latest (git) release of secp256k1, which has more
efficient verification-only context for Schnorr.  Switched to single
pre-instantiated instance of the verifier.
2022-01-01 09:08:19 -06:00
Greg Heartsfield
620e227699 fix: connection issues with Firefox
This adds Hyper, and a 200 response code.  Prior to this, Firefox
would fail to connect.  There is also a text document displayed at the
root URL to indicate this is a Nostr relay.

Fixes https://todo.sr.ht/~gheartsfield/nostr-rs-relay/15
2022-01-01 08:11:20 -06:00
Greg Heartsfield
14e59ed278 build: bump version to 0.2.2 2021-12-31 16:34:52 -06:00
Greg Heartsfield
5ad383f257 fix: incorrect logic on empty filters for hidden events 2021-12-31 16:34:10 -06:00
Greg Heartsfield
9710ea27aa build: bump version to 0.2.1 2021-12-31 15:38:58 -06:00
Greg Heartsfield
783a6e1042 docs: fix docker examples 2021-12-31 15:28:26 -06:00
Greg Heartsfield
4171a8870e feat: reject events that are too large
A new configuration setting controls the maximum size of event
messages, and sends a notice to the client if they exceed it.

Fixes https://todo.sr.ht/~gheartsfield/nostr-rs-relay/14
2021-12-31 15:19:35 -06:00
Greg Heartsfield
8f3891c781 docs: docker and config updates 2021-12-31 14:08:04 -06:00
Greg Heartsfield
415d32299b fix: docker run references the correct database file 2021-12-31 14:05:11 -06:00
Greg Heartsfield
5a19a8876f feat: allow database directory configuration
Adds configuration options for database directory, either on command
line through (--db dir-name) or the config.toml file.

Fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/13
2021-12-31 11:51:57 -06:00
32 changed files with 6467 additions and 1196 deletions

19
.build.yml Normal file
View File

@@ -0,0 +1,19 @@
image: fedora/latest
arch: x86_64
artifacts:
- nostr-rs-relay/target/release/nostr-rs-relay
environment:
RUST_LOG: debug
packages:
- cargo
- sqlite-devel
sources:
- https://git.sr.ht/~gheartsfield/nostr-rs-relay/
shell: false
tasks:
- build: |
cd nostr-rs-relay
cargo build --release
- test: |
cd nostr-rs-relay
cargo test --release

2
.cargo/config.toml Normal file
View File

@@ -0,0 +1,2 @@
[build]
rustflags = ["--cfg", "tokio_unstable"]

16
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,16 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/doublify/pre-commit-rust
rev: v1.0
hooks:
- id: fmt
- id: cargo-check
- id: clippy

1908
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,25 +1,46 @@
[package] [package]
name = "nostr-rs-relay" name = "nostr-rs-relay"
version = "0.2.0" version = "0.7.5"
edition = "2021" edition = "2021"
authors = ["Greg Heartsfield <scsibug@imap.cc>"]
description = "A relay implementation for the Nostr protocol"
readme = "README.md"
homepage = "https://sr.ht/~gheartsfield/nostr-rs-relay/"
repository = "https://git.sr.ht/~gheartsfield/nostr-rs-relay"
license = "MIT"
keywords = ["nostr", "server"]
categories = ["network-programming", "web-programming"]
[dependencies] [dependencies]
log = "^0.4" tracing = "0.1.36"
env_logger = "^0.9" tracing-subscriber = "0.2.0"
tokio = { version = "^1.14", features = ["full"] } tokio = { version = "1", features = ["full", "tracing", "signal"] }
futures = "^0.3" console-subscriber = "0.1.8"
futures-util = "^0.3" futures = "0.3"
tokio-tungstenite = "^0.16" futures-util = "0.3"
tungstenite = "^0.16" tokio-tungstenite = "0.17"
thiserror = "^1" tungstenite = "0.17"
uuid = { version = "^0.8", features = ["v4"] } thiserror = "1"
config = { version = "0.11", features = ["toml"] } uuid = { version = "1.1.2", features = ["v4"] }
bitcoin_hashes = { version = "^0.9", features = ["serde"] } config = { version = "0.12", features = ["toml"] }
secp256k1 = { version = "^0.20", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] } bitcoin_hashes = { version = "0.10", features = ["serde"] }
serde = { version = "^1.0", features = ["derive"] } secp256k1 = {version = "0.21", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
serde_json = "^1.0" serde = { version = "1.0", features = ["derive"] }
hex = "^0.4" serde_json = {version = "1.0", features = ["preserve_order"]}
rusqlite = "^0.26" hex = "0.4"
lazy_static = "^1.4" rusqlite = { version = "0.26", features = ["limits","bundled"]}
governor = "^0.4" r2d2 = "0.8"
nonzero_ext = "^0.3" r2d2_sqlite = "0.19"
lazy_static = "1.4"
governor = "0.4"
nonzero_ext = "0.3"
hyper = { version="0.14", features=["client", "server","http1","http2","tcp"] }
hyper-tls = "0.5"
http = { version = "0.2" }
parse_duration = "2"
rand = "0.8"
const_format = "0.2.28"
regex = "1"
[dev-dependencies]
anyhow = "1"

View File

@@ -1,23 +1,28 @@
FROM rust:1.57 as builder FROM docker.io/library/rust:1.66.0@sha256:359949280cebefe93ccb33089fe25111a3aadfe99eac4b6cbe8ec3e1b571dacb as builder
RUN USER=root cargo install cargo-auditable
RUN USER=root cargo new --bin nostr-rs-relay RUN USER=root cargo new --bin nostr-rs-relay
WORKDIR ./nostr-rs-relay WORKDIR ./nostr-rs-relay
COPY ./Cargo.toml ./Cargo.toml COPY ./Cargo.toml ./Cargo.toml
COPY ./Cargo.lock ./Cargo.lock COPY ./Cargo.lock ./Cargo.lock
RUN cargo build --release # build dependencies only (caching)
RUN cargo auditable build --release --locked
# get rid of starter project code
RUN rm src/*.rs RUN rm src/*.rs
# copy project source code
COPY ./src ./src COPY ./src ./src
# build auditable release using locked deps
RUN rm ./target/release/deps/nostr*relay* RUN rm ./target/release/deps/nostr*relay*
RUN cargo build --release RUN cargo auditable build --release --locked
FROM docker.io/library/debian:bullseye-20221205-slim@sha256:25f10b4f1ded5341a3ca0a30290ff3cd5639415f0c5a2222d5e7d5dd72952aa1
FROM debian:buster-slim
ARG APP=/usr/src/app ARG APP=/usr/src/app
ARG APP_DATA=/usr/src/app/db ARG APP_DATA=/usr/src/app/db
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y ca-certificates tzdata sqlite3 \ && apt-get install -y ca-certificates tzdata sqlite3 libc6 \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
EXPOSE 8080 EXPOSE 8080
@@ -35,9 +40,9 @@ COPY --from=builder /nostr-rs-relay/target/release/nostr-rs-relay ${APP}/nostr-r
RUN chown -R $APP_USER:$APP_USER ${APP} RUN chown -R $APP_USER:$APP_USER ${APP}
USER $APP_USER USER $APP_USER
WORKDIR ${APP_DATA} WORKDIR ${APP}
ENV RUST_LOG=info ENV RUST_LOG=info,nostr_rs_relay=info
ENV APP_DATA=${APP_DATA}
CMD ./nostr-rs-relay --db ${APP_DATA}
CMD ["../nostr-rs-relay"]

101
README.md
View File

@@ -1,26 +1,73 @@
# [nostr-rs-relay](https://git.sr.ht/~gheartsfield/nostr-rs-relay) # [nostr-rs-relay](https://git.sr.ht/~gheartsfield/nostr-rs-relay)
This is a [nostr](https://github.com/fiatjaf/nostr) relay, written in This is a [nostr](https://github.com/nostr-protocol/nostr) relay,
Rust. It currently supports the entire relay protocol, and has a written in Rust. It currently supports the entire relay protocol, and
SQLite persistence layer. persists data with SQLite.
The project master repository is available on The project master repository is available on
[sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is [sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is
mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay). mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
[![builds.sr.ht status](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master.svg)](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master?)
## Features
[NIPs](https://github.com/nostr-protocol/nips) with a relay-specific implementation are listed here.
- [x] NIP-01: [Basic protocol flow description](https://github.com/nostr-protocol/nips/blob/master/01.md)
* Core event model
* Hide old metadata events
* Id/Author prefix search
- [x] NIP-02: [Contact List and Petnames](https://github.com/nostr-protocol/nips/blob/master/02.md)
- [ ] NIP-03: [OpenTimestamps Attestations for Events](https://github.com/nostr-protocol/nips/blob/master/03.md)
- [x] NIP-05: [Mapping Nostr keys to DNS-based internet identifiers](https://github.com/nostr-protocol/nips/blob/master/05.md)
- [x] NIP-09: [Event Deletion](https://github.com/nostr-protocol/nips/blob/master/09.md)
- [x] NIP-11: [Relay Information Document](https://github.com/nostr-protocol/nips/blob/master/11.md)
- [x] NIP-12: [Generic Tag Queries](https://github.com/nostr-protocol/nips/blob/master/12.md)
- [x] NIP-15: [End of Stored Events Notice](https://github.com/nostr-protocol/nips/blob/master/15.md)
- [x] NIP-16: [Event Treatment](https://github.com/nostr-protocol/nips/blob/master/16.md)
- [x] NIP-20: [Command Results](https://github.com/nostr-protocol/nips/blob/master/20.md)
- [x] NIP-22: [Event `created_at` limits](https://github.com/nostr-protocol/nips/blob/master/22.md) (_future-dated events only_)
- [x] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md)
## Quick Start ## Quick Start
The provided `Dockerfile` will compile and build the server application. Use a bind mount to store the SQLite database outside of the container image, and map the container's 8080 port to a host port (8090 in the example below). The provided `Dockerfile` will compile and build the server
application. Use a bind mount to store the SQLite database outside of
the container image, and map the container's 8080 port to a host port
(7000 in the example below).
The examples below start a rootless podman container, mapping a local
data directory and config file.
```console ```console
$ docker build -t nostr-rs-relay . $ podman build -t nostr-rs-relay .
$ docker run -p 8090:8080 --mount src=$(pwd)/nostr_data,target=/usr/src/app/db,type=bind nostr-rs-relay
[2021-12-12T04:20:47Z INFO nostr_rs_relay] Listening on: 0.0.0.0:8080 $ mkdir data
[2021-12-12T04:20:47Z INFO nostr_rs_relay::db] Opened database for writing
[2021-12-12T04:20:47Z INFO nostr_rs_relay::db] init completed $ podman unshare chown 100:100 data
$ podman run -it --rm -p 7000:8080 \
--user=100:100 \
-v $(pwd)/data:/usr/src/app/db:Z \
-v $(pwd)/config.toml:/usr/src/app/config.toml:ro,Z \
--name nostr-relay nostr-rs-relay:latest
Nov 19 15:31:15.013 INFO nostr_rs_relay: Starting up from main
Nov 19 15:31:15.017 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080
Nov 19 15:31:15.019 INFO nostr_rs_relay::server: db writer created
Nov 19 15:31:15.019 INFO nostr_rs_relay::server: control message listener started
Nov 19 15:31:15.019 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=4)
Nov 19 15:31:15.019 INFO nostr_rs_relay::db: opened database "/usr/src/app/db/nostr.db" for writing
Nov 19 15:31:15.019 INFO nostr_rs_relay::schema: DB version = 0
Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: database pragma/schema initialized to v7, and ready
Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: All migration scripts completed successfully. Welcome to v7.
Nov 19 15:31:15.521 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=128)
``` ```
Use a `nostr` client such as [`noscl`](https://github.com/fiatjaf/noscl) to publish and query events. Use a `nostr` client such as
[`noscl`](https://github.com/fiatjaf/noscl) to publish and query
events.
```console ```console
$ noscl publish "hello world" $ noscl publish "hello world"
@@ -31,6 +78,40 @@ Text Note [81cf...2652] from 296a...9b92 5 seconds ago
hello world hello world
``` ```
A pre-built container is also available on DockerHub:
https://hub.docker.com/r/scsibug/nostr-rs-relay
## Configuration
The sample [`config.toml`](config.toml) file demonstrates the
configuration available to the relay. This file is optional, but may
be mounted into a docker container like so:
```console
$ docker run -it -p 7000:8080 \
--mount src=$(pwd)/config.toml,target=/usr/src/app/config.toml,type=bind \
--mount src=$(pwd)/data,target=/usr/src/app/db,type=bind \
nostr-rs-relay
```
Options include rate-limiting, event size limits, and network address
settings.
## Reverse Proxy Configuration
For examples of putting the relay behind a reverse proxy (for TLS
termination, load balancing, and other features), see [Reverse
Proxy](reverse-proxy.md).
## Dev Channel
For development discussions, please feel free to use the [sourcehut
mailing list](https://lists.sr.ht/~gheartsfield/nostr-rs-relay-devel).
Or, drop by the [Nostr Telegram Channel](https://t.me/nostr_protocol).
To chat about `nostr-rs-relay` on `nostr` itself; visit our channel on [anigma](https://anigma.io/) or another client that supports [NIP-28](https://github.com/nostr-protocol/nips/blob/master/28.md) chats:
* `2ad246a094fee48c6e455dd13d759d5f41b5a233120f5719d81ebc1935075194`
License License
--- ---
This project is MIT licensed. This project is MIT licensed.

View File

@@ -1,32 +1,119 @@
# Nostr-rs-relay configuration # Nostr-rs-relay configuration
[info]
# The advertised URL for the Nostr websocket.
relay_url = "wss://nostr.example.com/"
# Relay information for clients. Put your unique server name here.
name = "nostr-rs-relay"
# Description
description = "A newly created nostr-rs-relay.\n\nCustomize this with your own info."
# Administrative contact pubkey
#pubkey = "0c2d168a4ae8ca58c9f1ab237b5df682599c6c7ab74307ea8b05684b60405d41"
# Administrative contact URI
#contact = "mailto:contact@example.com"
[diagnostics]
# Enable tokio tracing (for use with tokio-console)
#tracing = true
[database]
# Directory for SQLite files. Defaults to the current directory. Can
# also be specified (and overriden) with the "--db dirname" command
# line option.
data_directory = "."
# Use an in-memory database instead of 'nostr.db'.
# Caution; this will not survive a process restart!
#in_memory = false
# Database connection pool settings for subscribers:
# Minimum number of SQLite reader connections
#min_conn = 4
# Maximum number of SQLite reader connections
#max_conn = 128
[network] [network]
# Bind to this network address # Bind to this network address
address = "0.0.0.0" address = "0.0.0.0"
# Listen on this port # Listen on this port
port = 8080 port = 8080
# If present, read this HTTP header for logging client IP addresses.
# Examples for common proxies, cloudflare:
#remote_ip_header = "x-forwarded-for"
#remote_ip_header = "cf-connecting-ip"
# Websocket ping interval in seconds, defaults to 5 minutes
#ping_interval = 300
[options] [options]
# Reject events that have timestamps greater than this many seconds in # Reject events that have timestamps greater than this many seconds in
# the future. Defaults to rejecting anything greater than 30 minutes # the future. Recommended to reject anything greater than 30 minutes
# from the current time. # from the current time, but the default is to allow any date.
#reject_future_seconds = 1800 reject_future_seconds = 1800
[limits] [limits]
# Limit events created per second, averaged over one minute. Must be # Limit events created per second, averaged over one minute. Must be
# an integer. If not set (or set to 0), defaults to unlimited. # an integer. If not set (or set to 0), defaults to unlimited.
messages_per_sec = 0 #messages_per_sec = 0
# Maximum WebSocket message in bytes. Defaults to 128k. # Limit the maximum size of an EVENT message. Defaults to 128 KB.
# Set to 0 for unlimited.
#max_event_bytes = 131072
# Maximum WebSocket message in bytes. Defaults to 128 KB.
#max_ws_message_bytes = 131072 #max_ws_message_bytes = 131072
# Maximum WebSocket frame size in bytes. Defaults to 128k. # Maximum WebSocket frame size in bytes. Defaults to 128 KB.
#max_ws_frame_bytes = 131072 #max_ws_frame_bytes = 131072
# Broadcast buffer size, in number of events. This prevents slow # Broadcast buffer size, in number of events. This prevents slow
# readers from consuming memory. Defaults to 4096. # readers from consuming memory.
#broadcast_buffer = 4096 #broadcast_buffer = 16384
# Event persistence buffer size, in number of events. This provides # Event persistence buffer size, in number of events. This provides
# backpressure to senders if writes are slow. Defaults to 16. # backpressure to senders if writes are slow.
#event_persist_buffer = 16 #event_persist_buffer = 4096
[authorization]
# Pubkey addresses in this array are whitelisted for event publishing.
# Only valid events by these authors will be accepted, if the variable
# is set.
#pubkey_whitelist = [
# "35d26e4690cbe1a898af61cc3515661eb5fa763b57bd0b42e45099c8b32fd50f",
# "887645fef0ce0c3c1218d2f5d8e6132a19304cdc57cd20281d082f38cfea0072",
#]
[verified_users]
# NIP-05 verification of users. Can be "enabled" to require NIP-05
# metadata for event authors, "passive" to perform validation but
# never block publishing, or "disabled" to do nothing.
#mode = "disabled"
# Domain names that will be prevented from publishing events.
#domain_blacklist = ["wellorder.net"]
# Domain names that are allowed to publish events. If defined, only
# events NIP-05 verified authors at these domains are persisted.
#domain_whitelist = ["example.com"]
# Consider an pubkey "verified" if we have a successful validation
# from the NIP-05 domain within this amount of time. Note, if the
# domain provides a successful response that omits the account,
# verification is immediately revoked.
#verify_expiration = "1 week"
# How long to wait between verification attempts for a specific author.
#verify_update_frequency = "24 hours"
# How many consecutive failed checks before we give up on verifying
# this author.
#max_consecutive_failures = 20

View File

@@ -0,0 +1,248 @@
# Author Verification Design Document
The relay will use NIP-05 DNS-based author verification to limit which
authors can publish events to a relay. This document describes how
this feature will operate.
## Considerations
DNS-based author verification is designed to be deployed in relays that
want to prevent spam, so there should be strong protections to prevent
unauthorized authors from persisting data. This includes data needed to
verify new authors.
There should be protections in place to ensure the relay cannot be
used to spam or flood other webservers. Additionally, there should be
protections against server-side request forgery (SSRF).
## Design Overview
### Concepts
All authors are initially "unverified". Unverified authors that submit
appropriate `NIP-05` metadata events become "candidates" for
verification. A candidate author becomes verified when the relay
inspects a kind `0` metadata event for the author with a `nip05` field,
and follows the procedure in `NIP-05` to successfully associate the
author with an internet identifier.
The `NIP-05` procedure verifies an author for a fixed period of time,
configurable by the relay operator. If this "verification expiration
time" (`verify_expiration`) is exceeded without being refreshed, they
are once again unverified.
Verified authors have their status regularly and automatically updated
through scheduled polling to their verified domain, this process is
"re-verification". It is performed based on the configuration setting
`verify_update_frequency`, which defines how long the relay waits
between verification attempts (whether the result was success or
failure).
Authors may change their verification data (the internet identifier from
`NIP-05`) with a new metadata event, which then requires
re-verification. Their old verification remains valid until
expiration.
Performing candidate author verification is a best-effort activity and
may be significantly rate-limited to prevent relays being used to
attack other hosts. Candidate verification (untrusted authors) should
never impact re-verification (trusted authors).
## Operating Modes
The relay may operate in one of three modes. "Disabled" performs no
validation activities, and will never permit or deny events based on
an author's NIP-05 metadata. "Passive" performs NIP-05 validation,
but does not permit or deny events based on the validity or presence
of NIP-05 metadata. "Enabled" will require current and valid NIP-05
metadata for any events to be persisted. "Enabled" mode will
additionally consider domain whitelist/blacklist configuration data to
restrict which author's events are persisted.
## Design Details
### Data Storage
Verification is stored in a dedicated table. This tracks:
* `nip05` identifier
* most recent verification timestamp
* most recent verification failure timestamp
* reference to the metadata event (used for tracking `created_at` and
`pubkey`)
### Event Handling
All events are first validated to ensure the signature is valid.
Incoming events of kind _other_ than metadata (kind `0`) submitted by
clients will be evaluated as follows.
* If the event's author has a current verification, the event is
persisted as normal.
* If the event's author has either no verification, or the
verification is expired, the event is rejected.
If the event is a metadata event, we handle it differently.
We first determine the verification status of the event's pubkey.
* If the event author is unverified, AND the event contains a `nip05`
key, we consider this a verification candidate.
* If the event author is unverified, AND the event does not contain a
`nip05` key, this is not a candidate, and the event is dropped.
* If the event author is verified, AND the event contains a `nip05`
key that is identical to the currently stored value, no special
action is needed.
* If the event author is verified, AND the event contains a different
`nip05` than was previously verified, with a more recent timestamp,
we need to re-verify.
* If the event author is verified, AND the event is missing a `nip05`
key, and the event timestamp is more recent than what was verified,
we do nothing. The current verification will be allowed to expire.
### Candidate Verification
When a candidate verification is requested, a rate limit will be
utilized. If the rate limit is exceeded, new candidate verification
requests will be dropped. In practice, this is implemented by a
size-limited channel that drops events that exceed a threshold.
Candidates are never persisted in the database.
### Re-Verification
Re-verification is straightforward when there has been no change to
the `nip05` key. A new request to the `nip05` domain is performed,
and if successful, the verification timestamp is updated to the
current time. If the request fails due to a timeout or server error,
the failure timestamp is updated instead.
When the the `nip05` key has changed and this event is more recent, we
will create a new verification record, and delete all other records
for the same name.
Regarding creating new records vs. updating: We never update the event
reference or `nip05` identifier in a verification record. Every update
either reset the last failure or last success timestamp.
### Determining Verification Status
In determining if an event is from a verified author, the following
procedure should be used:
Join the verification table with the event table, to provide
verification data alongside the event `created_at` and `pubkey`
metadata. Find the most recent verification record for the author,
based on the `created_at` time.
Reject the record if the success timestamp is not within our
configured expiration time.
Reject records with disallowed domains, based on any whitelists or
blacklists in effect.
If a result remains, the author is treated as verified.
This does give a time window for authors transitioning their verified
status between domains. There may be a period of time in which there
are multiple valid rows in the verification table for a given author.
### Cleaning Up Inactive Verifications
After a author verification has expired, we will continue to check for
it to become valid again. After a configurable number of attempts, we
should simply forget it, and reclaim the space.
### Addition of Domain Whitelist/Blacklist
A set of whitelisted or blacklisted domains may be provided. If both
are provided, only the whitelist is used. In this context, domains
are either "allowed" (present on a whitelist and NOT present on a
blacklist), or "denied" (NOT present on a whitelist and present on a
blacklist).
The processes outlined so far are modified in the presence of these
options:
* Only authors with allowed domains can become candidates for
verification.
* Verification status queries additionally filter out any denied
domains.
* Re-verification processes only proceed with allowed domains.
### Integration
We have an existing database writer thread, which receives events and
attempts to persist them to disk. Once validated and persisted, these
events are broadcast to all subscribers.
When verification is enabled, the writer must check to ensure a valid,
unexpired verification record exists for the auther. All metadata
events (regardless of verification status) are forwarded to a verifier
module. If the verifier determines a new verification record is
needed, it is also responsible for persisting and broadcasting the
event, just as the database writer would have done.
## Threat Scenarios
Some of these mitigations are fully implemented, others are documented
simply to demonstrate a mitigation is possible.
### Domain Spamming
*Threat*: A author with a high-volume of events creates a metadata event
with a bogus domain, causing the relay to generate significant
unwanted traffic to a target.
*Mitigation*: Rate limiting for all candidate verification will limit
external requests to a reasonable amount. Currently, this is a simple
delay that slows down the HTTP task.
### Denial of Service for Legitimate Authors
*Threat*: A author with a high-volume of events creates a metadata event
with a domain that is invalid for them, _but which is used by other
legitimate authors_. This triggers rate-limiting against the legitimate
domain, and blocks authors from updating their own metadata.
*Mitigation*: Rate limiting should only apply to candidates, so any
existing verified authors have priority for re-verification. New
authors will be affected, as we can not distinguish between the threat
and a legitimate author. _(Unimplemented)_
### Denial of Service by Consuming Storage
*Threat*: A author creates a high volume of random metadata events with
unique domains, in order to cause us to store large amounts of data
for to-be-verified authors.
*Mitigation*: No data is stored for candidate authors. This makes it
harder for new authors to become verified, but is effective at
preventing this attack.
### Metadata Replay for Verified Author
*Threat*: Attacker replays out-of-date metadata event for a author, to
cause a verification to fail.
*Mitigation*: New metadata events have their signed timestamp compared
against the signed timestamp of the event that has most recently
verified them. If the metadata event is older, it is discarded.
### Server-Side Request Forgery via Metadata
*Threat*: Attacker includes malicious data in the `nip05` event, which
is used to generate HTTP requests against potentially internal
resources. Either leaking data, or invoking webservices beyond their
own privileges.
*Mitigation*: Consider detecting and dropping when the `nip05` field
is an IP address. Allow the relay operator to utilize the `blacklist`
or `whitelist` to constrain hosts that will be contacted. Most
importantly, the verification process is hardcoded to only make
requests to a known url path
(`.well-known/nostr.json?name=<LOCAL_NAME>`). The `<LOCAL_NAME>`
component is restricted to a basic ASCII subset (preventing additional
URL components).

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env bash
sed -E 's/@sha256:[[:alnum:]]+//g' Dockerfile > Dockerfile.any-platform
echo "Created platform-agnostic Dockerfile in 'Dockerfile.any-platform'"

92
reverse-proxy.md Normal file
View File

@@ -0,0 +1,92 @@
# Reverse Proxy Setup Guide
It is recommended to run `nostr-rs-relay` behind a reverse proxy such
as `haproxy` or `nginx` to provide TLS termination. Simple examples
of `haproxy` and `nginx` configurations are documented here.
## Minimal HAProxy Configuration
Assumptions:
* HAProxy version is `2.4.10` or greater (older versions not tested).
* Hostname for the relay is `relay.example.com`.
* Your relay should be available over wss://relay.example.com
* Your (NIP-11) relay info page should be available on https://relay.example.com
* SSL certificate is located in `/etc/certs/example.com.pem`.
* Relay is running on port 8080.
* Limit connections to 400 concurrent.
* HSTS (HTTP Strict Transport Security) is desired.
* Only TLS 1.2 or greater is allowed.
```
global
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
frontend fe_prod
mode http
bind :443 ssl crt /etc/certs/example.com.pem alpn h2,http/1.1
bind :80
http-request set-header X-Forwarded-Proto https if { ssl_fc }
redirect scheme https code 301 if !{ ssl_fc }
acl host_relay hdr(host) -i relay.example.com
use_backend relay if host_relay
# HSTS (1 year)
http-response set-header Strict-Transport-Security max-age=31536000
backend relay
mode http
timeout connect 5s
timeout client 50s
timeout server 50s
timeout tunnel 1h
timeout client-fin 30s
option tcp-check
default-server maxconn 400 check inter 20s fastinter 1s
server relay 127.0.0.1:8080
```
### HAProxy Notes
You may experience WebSocket connection problems with Firefox if
HTTP/2 is enabled, for older versions of HAProxy (2.3.x). Either
disable HTTP/2 (`h2`), or upgrade HAProxy.
## Bare-bones Nginx Configuration
Assumptions:
* `Nginx` version is `1.18.0` (other versions not tested).
* Hostname for the relay is `relay.example.com`.
* SSL certificate and key are located at `/etc/letsencrypt/live/relay.example.com/`.
* Relay is running on port `8080`.
```
http {
server {
listen 443 ssl;
server_name relay.example.com;
ssl_certificate /etc/letsencrypt/live/relay.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/relay.example.com/privkey.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;
keepalive_timeout 70;
location / {
proxy_pass http://localhost:8080;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
}
}
}
```
### Nginx Notes
The above configuration was tested on `nginx` `1.18.0` was tested on `Ubuntu 20.04`.
For help installing `nginx` on `Ubuntu`, see [this guide](https://www.digitalocean.com/community/tutorials/how-to-install-nginx-on-ubuntu-20-04).
For guidance on using `letsencrypt` to obtain a cert on `Ubuntu`, including an `nginx` plugin, see [this post](https://www.digitalocean.com/community/tutorials/how-to-secure-nginx-with-let-s-encrypt-on-ubuntu-20-04).

View File

@@ -1 +1 @@
edition = "2018" edition = "2021"

View File

@@ -1,9 +1,11 @@
//! Subscription close request parsing //! Subscription close request parsing
//!
//! Representation and parsing of `CLOSE` messages sent from clients.
use crate::error::{Error, Result}; use crate::error::{Error, Result};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
/// Close command in network format /// Close command in network format
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct CloseCmd { pub struct CloseCmd {
/// Protocol command, expected to always be "CLOSE". /// Protocol command, expected to always be "CLOSE".
cmd: String, cmd: String,
@@ -11,8 +13,8 @@ pub struct CloseCmd {
id: String, id: String,
} }
/// Close command parsed /// Identifier of the subscription to be closed.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct Close { pub struct Close {
/// The subscription identifier being closed. /// The subscription identifier being closed.
pub id: String, pub id: String,
@@ -21,10 +23,10 @@ pub struct Close {
impl From<CloseCmd> for Result<Close> { impl From<CloseCmd> for Result<Close> {
fn from(cc: CloseCmd) -> Result<Close> { fn from(cc: CloseCmd) -> Result<Close> {
// ensure command is correct // ensure command is correct
if cc.cmd != "CLOSE" { if cc.cmd == "CLOSE" {
Err(Error::CommandUnknownError)
} else {
Ok(Close { id: cc.id }) Ok(Close { id: cc.id })
} else {
Err(Error::CommandUnknownError)
} }
} }
} }

View File

@@ -1,28 +1,44 @@
use lazy_static::lazy_static; //! Configuration file and settings management
use log::*; use config::{Config, ConfigError, File};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::sync::RwLock; use std::time::Duration;
use tracing::warn;
// initialize a singleton default configuration #[derive(Debug, Serialize, Deserialize, Clone)]
lazy_static! { #[allow(unused)]
pub static ref SETTINGS: RwLock<Settings> = RwLock::new(Settings::default()); pub struct Info {
pub relay_url: Option<String>,
pub name: Option<String>,
pub description: Option<String>,
pub pubkey: Option<String>,
pub contact: Option<String>,
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Database {
pub data_directory: String,
pub in_memory: bool,
pub min_conn: u32,
pub max_conn: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)] #[allow(unused)]
pub struct Network { pub struct Network {
pub port: u16, pub port: u16,
pub address: String, pub address: String,
pub remote_ip_header: Option<String>, // retrieve client IP from this HTTP header if present
pub ping_interval_seconds: u32,
} }
// #[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Serialize, Deserialize)]
#[allow(unused)] #[allow(unused)]
pub struct Options { pub struct Options {
pub reject_future_seconds: Option<usize>, // if defined, reject any events with a timestamp more than X seconds in the future pub reject_future_seconds: Option<usize>, // if defined, reject any events with a timestamp more than X seconds in the future
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)] #[allow(unused)]
pub struct Retention { pub struct Retention {
// TODO: implement // TODO: implement
@@ -32,49 +48,143 @@ pub struct Retention {
pub whitelist_addresses: Option<Vec<String>>, // whitelisted addresses (never delete) pub whitelist_addresses: Option<Vec<String>>, // whitelisted addresses (never delete)
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)] #[allow(unused)]
pub struct Limits { pub struct Limits {
pub messages_per_sec: Option<u32>, // Artificially slow down event writing to limit disk consumption (averaged over 1 minute) pub messages_per_sec: Option<u32>, // Artificially slow down event writing to limit disk consumption (averaged over 1 minute)
pub max_event_bytes: Option<usize>, pub max_event_bytes: Option<usize>, // Maximum size of an EVENT message
pub max_ws_message_bytes: Option<usize>, pub max_ws_message_bytes: Option<usize>,
pub max_ws_frame_bytes: Option<usize>, pub max_ws_frame_bytes: Option<usize>,
pub broadcast_buffer: usize, // events to buffer for subscribers (prevents slow readers from consuming memory) pub broadcast_buffer: usize, // events to buffer for subscribers (prevents slow readers from consuming memory)
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow) pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Authorization {
pub pubkey_whitelist: Option<Vec<String>>, // If present, only allow these pubkeys to publish events
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Diagnostics {
pub tracing: bool, // enables tokio console-subscriber
}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy)]
#[serde(rename_all = "lowercase")]
pub enum VerifiedUsersMode {
Enabled,
Passive,
Disabled,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct VerifiedUsers {
pub mode: VerifiedUsersMode, // Mode of operation: "enabled" (enforce) or "passive" (check only). If none, this is simply disabled.
pub domain_whitelist: Option<Vec<String>>, // If present, only allow verified users from these domains can publish events
pub domain_blacklist: Option<Vec<String>>, // If present, allow all verified users from any domain except these
pub verify_expiration: Option<String>, // how long a verification is cached for before no longer being used
pub verify_update_frequency: Option<String>, // how often to attempt to update verification
pub verify_expiration_duration: Option<Duration>, // internal result of parsing verify_expiration
pub verify_update_frequency_duration: Option<Duration>, // internal result of parsing verify_update_frequency
pub max_consecutive_failures: usize, // maximum number of verification failures in a row, before ceasing future checks
}
impl VerifiedUsers {
pub fn init(&mut self) {
self.verify_expiration_duration = self.verify_expiration_duration();
self.verify_update_frequency_duration = self.verify_update_duration();
}
#[must_use]
pub fn is_enabled(&self) -> bool {
self.mode == VerifiedUsersMode::Enabled
}
#[must_use]
pub fn is_active(&self) -> bool {
self.mode == VerifiedUsersMode::Enabled || self.mode == VerifiedUsersMode::Passive
}
#[must_use]
pub fn is_passive(&self) -> bool {
self.mode == VerifiedUsersMode::Passive
}
#[must_use]
pub fn verify_expiration_duration(&self) -> Option<Duration> {
self.verify_expiration
.as_ref()
.and_then(|x| parse_duration::parse(x).ok())
}
#[must_use]
pub fn verify_update_duration(&self) -> Option<Duration> {
self.verify_update_frequency
.as_ref()
.and_then(|x| parse_duration::parse(x).ok())
}
#[must_use]
pub fn is_valid(&self) -> bool {
self.verify_expiration_duration().is_some() && self.verify_update_duration().is_some()
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)] #[allow(unused)]
pub struct Settings { pub struct Settings {
pub info: Info,
pub diagnostics: Diagnostics,
pub database: Database,
pub network: Network, pub network: Network,
pub limits: Limits, pub limits: Limits,
pub authorization: Authorization,
pub verified_users: VerifiedUsers,
pub retention: Retention, pub retention: Retention,
pub options: Options, pub options: Options,
} }
impl Settings { impl Settings {
#[must_use]
pub fn new() -> Self { pub fn new() -> Self {
let d = Self::default(); let default_settings = Self::default();
// attempt to construct settings with file // attempt to construct settings with file
// Self::new_from_default(&d).unwrap_or(d) let from_file = Self::new_from_default(&default_settings);
let from_file = Self::new_from_default(&d);
match from_file { match from_file {
Ok(f) => f, Ok(f) => f,
Err(e) => { Err(e) => {
warn!("Error reading config file ({:?})", e); warn!("Error reading config file ({:?})", e);
d default_settings
} }
} }
} }
fn new_from_default(default: &Settings) -> Result<Self, config::ConfigError> { fn new_from_default(default: &Settings) -> Result<Self, ConfigError> {
let config: config::Config = config::Config::new(); let builder = Config::builder();
let settings: Settings = config let config: Config = builder
// use defaults // use defaults
.with_merged(config::Config::try_from(default).unwrap())? .add_source(Config::try_from(default)?)
// override with file contents // override with file contents
.with_merged(config::File::with_name("config"))? .add_source(File::with_name("config.toml"))
.try_into()?; .build()?;
let mut settings: Settings = config.try_deserialize()?;
// ensure connection pool size is logical
assert!(
settings.database.min_conn <= settings.database.max_conn,
"Database min_conn setting ({}) cannot exceed max_conn ({})",
settings.database.min_conn,
settings.database.max_conn
);
// ensure durations parse
assert!(
settings.verified_users.is_valid(),
"VerifiedUsers time settings could not be parsed"
);
// initialize durations for verified users
settings.verified_users.init();
Ok(settings) Ok(settings)
} }
} }
@@ -82,17 +192,46 @@ impl Settings {
impl Default for Settings { impl Default for Settings {
fn default() -> Self { fn default() -> Self {
Settings { Settings {
info: Info {
relay_url: None,
name: Some("Unnamed nostr-rs-relay".to_owned()),
description: None,
pubkey: None,
contact: None,
},
diagnostics: Diagnostics { tracing: false },
database: Database {
data_directory: ".".to_owned(),
in_memory: false,
min_conn: 4,
max_conn: 128,
},
network: Network { network: Network {
port: 8080, port: 8080,
ping_interval_seconds: 300,
address: "0.0.0.0".to_owned(), address: "0.0.0.0".to_owned(),
remote_ip_header: None,
}, },
limits: Limits { limits: Limits {
messages_per_sec: None, messages_per_sec: None,
max_event_bytes: Some(2 << 17), // 128K max_event_bytes: Some(2 << 17), // 128K
max_ws_message_bytes: Some(2 << 17), // 128K max_ws_message_bytes: Some(2 << 17), // 128K
max_ws_frame_bytes: Some(2 << 17), // 128K max_ws_frame_bytes: Some(2 << 17), // 128K
broadcast_buffer: 4096, broadcast_buffer: 16384,
event_persist_buffer: 16, event_persist_buffer: 4096,
},
authorization: Authorization {
pubkey_whitelist: None, // Allow any address to publish
},
verified_users: VerifiedUsers {
mode: VerifiedUsersMode::Disabled,
domain_whitelist: None,
domain_blacklist: None,
verify_expiration: Some("1 week".to_owned()),
verify_update_frequency: Some("1 day".to_owned()),
verify_expiration_duration: None,
verify_update_frequency_duration: None,
max_consecutive_failures: 20,
}, },
retention: Retention { retention: Retention {
max_events: None, // max events max_events: None, // max events
@@ -101,7 +240,7 @@ impl Default for Settings {
whitelist_addresses: None, // whitelisted addresses (never delete) whitelist_addresses: None, // whitelisted addresses (never delete)
}, },
options: Options { options: Options {
reject_future_seconds: Some(30 * 60), // Reject events 30min in the future or greater reject_future_seconds: None, // Reject events in the future if defined
}, },
} }
} }

View File

@@ -2,11 +2,10 @@
use crate::close::Close; use crate::close::Close;
use crate::error::Error; use crate::error::Error;
use crate::error::Result; use crate::error::Result;
use crate::event::Event;
use crate::subscription::Subscription; use crate::subscription::Subscription;
use log::*;
use std::collections::HashMap; use std::collections::HashMap;
use tracing::{debug, info};
use uuid::Uuid; use uuid::Uuid;
/// A subscription identifier has a maximum length /// A subscription identifier has a maximum length
@@ -14,6 +13,8 @@ const MAX_SUBSCRIPTION_ID_LEN: usize = 256;
/// State for a client connection /// State for a client connection
pub struct ClientConn { pub struct ClientConn {
/// Client IP (either from socket, or configured proxy header
client_ip: String,
/// Unique client identifier generated at connection time /// Unique client identifier generated at connection time
client_id: Uuid, client_id: Uuid,
/// The current set of active client subscriptions /// The current set of active client subscriptions
@@ -24,39 +25,44 @@ pub struct ClientConn {
impl Default for ClientConn { impl Default for ClientConn {
fn default() -> Self { fn default() -> Self {
Self::new() Self::new("unknown".to_owned())
} }
} }
impl ClientConn { impl ClientConn {
/// Create a new, empty connection state. /// Create a new, empty connection state.
pub fn new() -> Self { #[must_use]
pub fn new(client_ip: String) -> Self {
let client_id = Uuid::new_v4(); let client_id = Uuid::new_v4();
ClientConn { ClientConn {
client_ip,
client_id, client_id,
subscriptions: HashMap::new(), subscriptions: HashMap::new(),
max_subs: 32, max_subs: 32,
} }
} }
pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
&self.subscriptions
}
/// Get a short prefix of the client's unique identifier, suitable /// Get a short prefix of the client's unique identifier, suitable
/// for logging. /// for logging.
#[must_use]
pub fn get_client_prefix(&self) -> String { pub fn get_client_prefix(&self) -> String {
self.client_id.to_string().chars().take(8).collect() self.client_id.to_string().chars().take(8).collect()
} }
/// Find all matching subscriptions. #[must_use]
pub fn get_matching_subscriptions(&self, e: &Event) -> Vec<&str> { pub fn ip(&self) -> &str {
let mut v: Vec<&str> = vec![]; &self.client_ip
for (id, sub) in self.subscriptions.iter() {
if sub.interested_in_event(e) {
v.push(id);
}
}
v
} }
/// Add a new subscription for this connection. /// Add a new subscription for this connection.
/// # Errors
///
/// Will return `Err` if the client has too many subscriptions, or
/// if the provided name is excessively long.
pub fn subscribe(&mut self, s: Subscription) -> Result<()> { pub fn subscribe(&mut self, s: Subscription) -> Result<()> {
let k = s.get_id(); let k = s.get_id();
let sub_id_len = k.len(); let sub_id_len = k.len();
@@ -72,8 +78,12 @@ impl ClientConn {
// check if an existing subscription exists, and replace if so // check if an existing subscription exists, and replace if so
if self.subscriptions.contains_key(&k) { if self.subscriptions.contains_key(&k) {
self.subscriptions.remove(&k); self.subscriptions.remove(&k);
self.subscriptions.insert(k, s); self.subscriptions.insert(k, s.clone());
debug!("replaced existing subscription"); debug!(
"replaced existing subscription (cid: {}, sub: {:?})",
self.get_client_prefix(),
s.get_id()
);
return Ok(()); return Ok(());
} }
@@ -84,19 +94,21 @@ impl ClientConn {
// add subscription // add subscription
self.subscriptions.insert(k, s); self.subscriptions.insert(k, s);
debug!( debug!(
"registered new subscription, currently have {} active subs", "registered new subscription, currently have {} active subs (cid: {})",
self.subscriptions.len() self.subscriptions.len(),
self.get_client_prefix(),
); );
Ok(()) Ok(())
} }
/// Remove the subscription for this connection. /// Remove the subscription for this connection.
pub fn unsubscribe(&mut self, c: Close) { pub fn unsubscribe(&mut self, c: &Close) {
// TODO: return notice if subscription did not exist. // TODO: return notice if subscription did not exist.
self.subscriptions.remove(&c.id); self.subscriptions.remove(&c.id);
debug!( debug!(
"removed subscription, currently have {} active subs", "removed subscription, currently have {} active subs (cid: {})",
self.subscriptions.len() self.subscriptions.len(),
self.get_client_prefix(),
); );
} }
} }

849
src/db.rs
View File

@@ -1,136 +1,128 @@
//! Event persistence and querying //! Event persistence and querying
use crate::error::Result; //use crate::config::SETTINGS;
use crate::event::Event; use crate::config::Settings;
use crate::error::{Error, Result};
use crate::event::{single_char_tagname, Event};
use crate::hexrange::hex_range;
use crate::hexrange::HexSearch;
use crate::nip05;
use crate::notice::Notice;
use crate::schema::{upgrade_db, STARTUP_SQL};
use crate::subscription::ReqFilter;
use crate::subscription::Subscription; use crate::subscription::Subscription;
use crate::utils::{is_hex, is_lower_hex};
use governor::clock::Clock; use governor::clock::Clock;
use governor::{Quota, RateLimiter}; use governor::{Quota, RateLimiter};
use hex; use hex;
use log::*; use r2d2;
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::params; use rusqlite::params;
use rusqlite::Connection; use rusqlite::types::ToSql;
use rusqlite::OpenFlags; use rusqlite::OpenFlags;
//use std::num::NonZeroU32; use std::fmt::Write as _;
use crate::config::SETTINGS;
use std::path::Path; use std::path::Path;
use std::thread; use std::thread;
use std::time::Duration;
use std::time::Instant;
use tokio::task; use tokio::task;
use tracing::{debug, info, trace, warn};
pub type SqlitePool = r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>;
pub type PooledConnection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnectionManager>;
/// Events submitted from a client, with a return channel for notices
pub struct SubmittedEvent {
pub event: Event,
pub notice_tx: tokio::sync::mpsc::Sender<Notice>,
}
/// Database file /// Database file
const DB_FILE: &str = "nostr.db"; pub const DB_FILE: &str = "nostr.db";
/// Startup DB Pragmas /// Build a database connection pool.
const STARTUP_SQL: &str = r##" /// # Panics
PRAGMA main.synchronous=NORMAL; ///
PRAGMA foreign_keys = ON; /// Will panic if the pool could not be created.
pragma mmap_size = 536870912; -- 512MB of mmap #[must_use]
"##; pub fn build_pool(
name: &str,
/// Schema definition settings: &Settings,
const INIT_SQL: &str = r##" flags: OpenFlags,
-- Database settings min_size: u32,
PRAGMA encoding = "UTF-8"; max_size: u32,
PRAGMA journal_mode=WAL; wait_for_db: bool,
PRAGMA main.synchronous=NORMAL; ) -> SqlitePool {
PRAGMA foreign_keys = ON; let db_dir = &settings.database.data_directory;
PRAGMA application_id = 1654008667; let full_path = Path::new(db_dir).join(DB_FILE);
PRAGMA user_version = 2; // small hack; if the database doesn't exist yet, that means the
// writer thread hasn't finished. Give it a chance to work. This
-- Event Table // is only an issue with the first time we run.
CREATE TABLE IF NOT EXISTS event ( if !settings.database.in_memory {
id INTEGER PRIMARY KEY, while !full_path.exists() && wait_for_db {
event_hash BLOB NOT NULL, -- 4-byte hash debug!("Database reader pool is waiting on the database to be created...");
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970) thread::sleep(Duration::from_millis(500));
created_at INTEGER NOT NULL, -- when the event was authored
author BLOB NOT NULL, -- author pubkey
kind INTEGER NOT NULL, -- event kind
hidden INTEGER, -- relevant for queries
content TEXT NOT NULL -- serialized json of event object
);
-- Event Indexes
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS author_index ON event(author);
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
-- Event References Table
CREATE TABLE IF NOT EXISTS event_ref (
id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL, -- an event ID that contains an #e tag.
referenced_event BLOB NOT NULL, -- the event that is referenced.
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
-- Event References Index
CREATE INDEX IF NOT EXISTS event_ref_index ON event_ref(referenced_event);
-- Pubkey References Table
CREATE TABLE IF NOT EXISTS pubkey_ref (
id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL, -- an event ID that contains an #p tag.
referenced_pubkey BLOB NOT NULL, -- the pubkey that is referenced.
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE RESTRICT ON DELETE CASCADE
);
-- Pubkey References Index
CREATE INDEX IF NOT EXISTS pubkey_ref_index ON pubkey_ref(referenced_pubkey);
"##;
/// Upgrade DB to latest version, and execute pragma settings
pub fn upgrade_db(conn: &mut Connection) -> Result<()> {
// check the version.
let curr_version = db_version(conn)?;
info!("DB version = {:?}", curr_version);
// initialize from scratch
if curr_version == 0 {
match conn.execute_batch(INIT_SQL) {
Ok(()) => info!("database pragma/schema initialized to v2, and ready"),
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be initialized");
}
} }
} else if curr_version == 1 {
// only change is adding a hidden column to events.
let upgrade_sql = r##"
ALTER TABLE event ADD hidden INTEGER;
UPDATE event SET hidden=FALSE;
PRAGMA user_version = 2;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => info!("database schema upgraded v1 -> v2"),
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
} else if curr_version == 2 {
debug!("Database version was already current");
} else if curr_version > 2 {
panic!("Database version is newer than supported by this executable");
} }
// Setup PRAGMA let manager = if settings.database.in_memory {
conn.execute_batch(STARTUP_SQL)?; SqliteConnectionManager::memory()
Ok(()) .with_flags(flags)
.with_init(|c| c.execute_batch(STARTUP_SQL))
} else {
SqliteConnectionManager::file(&full_path)
.with_flags(flags)
.with_init(|c| c.execute_batch(STARTUP_SQL))
};
let pool: SqlitePool = r2d2::Pool::builder()
.test_on_check_out(true) // no noticeable performance hit
.min_idle(Some(min_size))
.max_size(max_size)
.build(manager)
.unwrap();
info!(
"Built a connection pool {:?} (min={}, max={})",
name, min_size, max_size
);
pool
} }
/// Spawn a database writer that persists events to the SQLite store. /// Spawn a database writer that persists events to the SQLite store.
pub async fn db_writer( pub async fn db_writer(
mut event_rx: tokio::sync::mpsc::Receiver<Event>, settings: Settings,
mut event_rx: tokio::sync::mpsc::Receiver<SubmittedEvent>,
bcast_tx: tokio::sync::broadcast::Sender<Event>, bcast_tx: tokio::sync::broadcast::Sender<Event>,
metadata_tx: tokio::sync::broadcast::Sender<Event>,
mut shutdown: tokio::sync::broadcast::Receiver<()>, mut shutdown: tokio::sync::broadcast::Receiver<()>,
) -> tokio::task::JoinHandle<Result<()>> { ) -> tokio::task::JoinHandle<Result<()>> {
// are we performing NIP-05 checking?
let nip05_active = settings.verified_users.is_active();
// are we requriing NIP-05 user verification?
let nip05_enabled = settings.verified_users.is_enabled();
task::spawn_blocking(move || { task::spawn_blocking(move || {
let mut conn = Connection::open_with_flags( let db_dir = &settings.database.data_directory;
Path::new(DB_FILE), let full_path = Path::new(db_dir).join(DB_FILE);
// create a connection pool
let pool = build_pool(
"event writer",
&settings,
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
)?; 1,
info!("opened database for writing"); 4,
upgrade_db(&mut conn)?; false,
);
if settings.database.in_memory {
info!("using in-memory database, this will not persist a restart!");
} else {
info!("opened database {:?} for writing", full_path);
}
upgrade_db(&mut pool.get()?)?;
// Make a copy of the whitelist
let whitelist = &settings.authorization.pubkey_whitelist.clone();
// get rate limit settings // get rate limit settings
let config = SETTINGS.read().unwrap(); let rps_setting = settings.limits.messages_per_sec;
let rps_setting = config.limits.messages_per_sec; let mut most_recent_rate_limit = Instant::now();
let mut lim_opt = None; let mut lim_opt = None;
let clock = governor::clock::QuantaClock::default(); let clock = governor::clock::QuantaClock::default();
if let Some(rps) = rps_setting { if let Some(rps) = rps_setting {
@@ -141,7 +133,7 @@ pub async fn db_writer(
} }
} }
loop { loop {
if let Ok(_) = shutdown.try_recv() { if shutdown.try_recv().is_ok() {
info!("shutting down database writer"); info!("shutting down database writer");
break; break;
} }
@@ -151,114 +143,264 @@ pub async fn db_writer(
if next_event.is_none() { if next_event.is_none() {
break; break;
} }
// track if an event write occurred; this is used to
// update the rate limiter
let mut event_write = false; let mut event_write = false;
let event = next_event.unwrap(); let subm_event = next_event.unwrap();
match write_event(&mut conn, &event) { let event = subm_event.event;
Ok(updated) => { let notice_tx = subm_event.notice_tx;
if updated == 0 { // check if this event is authorized.
debug!("ignoring duplicate event"); if let Some(allowed_addrs) = whitelist {
} else { // TODO: incorporate delegated pubkeys
info!("persisted event: {}", event.get_event_id_prefix()); // if the event address is not in allowed_addrs.
event_write = true; if !allowed_addrs.contains(&event.pubkey) {
// send this out to all clients info!(
bcast_tx.send(event.clone()).ok(); "Rejecting event {}, unauthorized author",
} event.get_event_id_prefix()
} );
Err(err) => { notice_tx
warn!("event insert failed: {}", err); .try_send(Notice::blocked(
event.id,
"pubkey is not allowed to publish to this relay",
))
.ok();
continue;
} }
} }
// send any metadata events to the NIP-05 verifier
if nip05_active && event.is_kind_metadata() {
// we are sending this prior to even deciding if we
// persist it. this allows the nip05 module to
// inspect it, update if necessary, or persist a new
// event and broadcast it itself.
metadata_tx.send(event.clone()).ok();
}
// check for NIP-05 verification
if nip05_enabled {
match nip05::query_latest_user_verification(pool.get()?, event.pubkey.to_owned()) {
Ok(uv) => {
if uv.is_valid(&settings.verified_users) {
info!(
"new event from verified author ({:?},{:?})",
uv.name.to_string(),
event.get_author_prefix()
);
} else {
info!("rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)",
uv.name.to_string(),
event.get_author_prefix()
);
notice_tx
.try_send(Notice::blocked(
event.id,
"NIP-05 verification is no longer valid (expired/wrong domain)",
))
.ok();
continue;
}
}
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
debug!(
"no verification records found for pubkey: {:?}",
event.get_author_prefix()
);
notice_tx
.try_send(Notice::blocked(
event.id,
"NIP-05 verification needed to publish events",
))
.ok();
continue;
}
Err(e) => {
warn!("checking nip05 verification status failed: {:?}", e);
continue;
}
}
}
// TODO: cache recent list of authors to remove a DB call.
let start = Instant::now();
if event.kind >= 20000 && event.kind < 30000 {
bcast_tx.send(event.clone()).ok();
info!(
"published ephemeral event: {:?} from: {:?} in: {:?}",
event.get_event_id_prefix(),
event.get_author_prefix(),
start.elapsed()
);
event_write = true
} else {
match write_event(&mut pool.get()?, &event) {
Ok(updated) => {
if updated == 0 {
trace!("ignoring duplicate or deleted event");
notice_tx.try_send(Notice::duplicate(event.id)).ok();
} else {
info!(
"persisted event: {:?} from: {:?} in: {:?}",
event.get_event_id_prefix(),
event.get_author_prefix(),
start.elapsed()
);
event_write = true;
// send this out to all clients
bcast_tx.send(event.clone()).ok();
notice_tx.try_send(Notice::saved(event.id)).ok();
}
}
Err(err) => {
warn!("event insert failed: {:?}", err);
let msg = "relay experienced an error trying to publish the latest event";
notice_tx.try_send(Notice::error(event.id, msg)).ok();
}
}
}
// use rate limit, if defined, and if an event was actually written. // use rate limit, if defined, and if an event was actually written.
if event_write { if event_write {
if let Some(ref lim) = lim_opt { if let Some(ref lim) = lim_opt {
if let Err(n) = lim.check() { if let Err(n) = lim.check() {
info!("Rate limiting event creation"); let wait_for = n.wait_time_from(clock.now());
thread::sleep(n.wait_time_from(clock.now())); // check if we have recently logged rate
// limits, but print out a message only once
// per second.
if most_recent_rate_limit.elapsed().as_secs() > 10 {
warn!(
"rate limit reached for event creation (sleep for {:?}) (suppressing future messages for 10 seconds)",
wait_for
);
// reset last rate limit message
most_recent_rate_limit = Instant::now();
}
// block event writes, allowing them to queue up
thread::sleep(wait_for);
continue; continue;
} }
} }
} }
} }
conn.close().ok();
info!("database connection closed"); info!("database connection closed");
Ok(()) Ok(())
}) })
} }
pub fn db_version(conn: &mut Connection) -> Result<usize> { /// Persist an event to the database, returning rows added.
let query = "PRAGMA user_version;"; pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
let curr_version = conn.query_row(query, [], |row| row.get(0))?;
Ok(curr_version)
}
/// Persist an event to the database.
pub fn write_event(conn: &mut Connection, e: &Event) -> Result<usize> {
// start transaction // start transaction
let tx = conn.transaction()?; let tx = conn.transaction()?;
// get relevant fields from event and convert to blobs. // get relevant fields from event and convert to blobs.
let id_blob = hex::decode(&e.id).ok(); let id_blob = hex::decode(&e.id).ok();
let pubkey_blob = hex::decode(&e.pubkey).ok(); let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
let delegator_blob: Option<Vec<u8>> = e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
let event_str = serde_json::to_string(&e).ok(); let event_str = serde_json::to_string(&e).ok();
// ignore if the event hash is a duplicate. // ignore if the event hash is a duplicate.
let ins_count = tx.execute( let mut ins_count = tx.execute(
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, strftime('%s','now'), FALSE);", "INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, strftime('%s','now'), FALSE);",
params![id_blob, e.created_at, e.kind, pubkey_blob, event_str] params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str]
)?; )?;
if ins_count == 0 { if ins_count == 0 {
// if the event was a duplicate, no need to insert event or // if the event was a duplicate, no need to insert event or
// pubkey references. // pubkey references. This will abort the txn.
return Ok(ins_count); return Ok(ins_count);
} }
// remember primary key of the event most recently inserted. // remember primary key of the event most recently inserted.
let ev_id = tx.last_insert_rowid(); let ev_id = tx.last_insert_rowid();
// add all event tags into the event_ref table // add all tags to the tag table
let etags = e.get_event_tags(); for tag in e.tags.iter() {
if !etags.is_empty() { // ensure we have 2 values.
for etag in etags.iter() { if tag.len() >= 2 {
tx.execute( let tagname = &tag[0];
"INSERT OR IGNORE INTO event_ref (event_id, referenced_event) VALUES (?1, ?2)", let tagval = &tag[1];
params![ev_id, hex::decode(&etag).ok()], // only single-char tags are searchable
)?; let tagchar_opt = single_char_tagname(tagname);
match &tagchar_opt {
Some(_) => {
// if tagvalue is lowercase hex;
if is_lower_hex(tagval) && (tagval.len() % 2 == 0) {
tx.execute(
"INSERT OR IGNORE INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3)",
params![ev_id, &tagname, hex::decode(tagval).ok()],
)?;
} else {
tx.execute(
"INSERT OR IGNORE INTO tag (event_id, name, value) VALUES (?1, ?2, ?3)",
params![ev_id, &tagname, &tagval],
)?;
}
}
None => {}
}
} }
} }
// add all event tags into the pubkey_ref table // if this event is replaceable update, hide every other replaceable
let ptags = e.get_pubkey_tags(); // event with the same kind from the same author that was issued
if !ptags.is_empty() { // earlier than this.
for ptag in ptags.iter() { if e.kind == 0 || e.kind == 3 || (e.kind >= 10000 && e.kind < 20000) {
tx.execute(
"INSERT OR IGNORE INTO pubkey_ref (event_id, referenced_pubkey) VALUES (?1, ?2)",
params![ev_id, hex::decode(&ptag).ok()],
)?;
}
}
// if this event is for a metadata update, hide every other kind=0
// event from the same author that was issued earlier than this.
if e.kind == 0 {
let update_count = tx.execute( let update_count = tx.execute(
"UPDATE event SET hidden=TRUE WHERE id!=? AND kind=0 AND author=? AND created_at <= ? and hidden!=TRUE", "UPDATE event SET hidden=TRUE WHERE id!=? AND kind=? AND author=? AND created_at <= ? and hidden!=TRUE",
params![ev_id, hex::decode(&e.pubkey).ok(), e.created_at], params![ev_id, e.kind, hex::decode(&e.pubkey).ok(), e.created_at],
)?; )?;
if update_count > 0 { if update_count > 0 {
info!("hid {} older metadata events", update_count); info!(
"hid {} older replaceable kind {} events for author: {:?}",
update_count,
e.kind,
e.get_author_prefix()
);
} }
} }
// if this event is for a contact update, hide every other kind=3 // if this event is a deletion, hide the referenced events from the same author.
// event from the same author that was issued earlier than this. if e.kind == 5 {
if e.kind == 3 { let event_candidates = e.tag_values_by_name("e");
let update_count = tx.execute( // first parameter will be author
"UPDATE event SET hidden=TRUE WHERE id!=? AND kind=3 AND author=? AND created_at <= ? and hidden!=TRUE", let mut params: Vec<Box<dyn ToSql>> = vec![Box::new(hex::decode(&e.pubkey)?)];
params![ev_id, hex::decode(&e.pubkey).ok(), e.created_at], event_candidates
)?; .iter()
if update_count > 0 { .filter(|x| is_hex(x) && x.len() == 64)
info!("hid {} older contact events", update_count); .filter_map(|x| hex::decode(x).ok())
.for_each(|x| params.push(Box::new(x)));
let query = format!(
"UPDATE event SET hidden=TRUE WHERE kind!=5 AND author=? AND event_hash IN ({})",
repeat_vars(params.len() - 1)
);
let mut stmt = tx.prepare(&query)?;
let update_count = stmt.execute(rusqlite::params_from_iter(params))?;
info!(
"hid {} deleted events for author {:?}",
update_count,
e.get_author_prefix()
);
} else {
// check if a deletion has already been recorded for this event.
// Only relevant for non-deletion events
let del_count = tx.query_row(
"SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.author=? AND t.name='e' AND e.kind=5 AND t.value_hex=? LIMIT 1;",
params![pubkey_blob, id_blob], |row| row.get::<usize, usize>(0));
// check if a the query returned a result, meaning we should
// hid the current event
if del_count.ok().is_some() {
// a deletion already existed, mark original event as hidden.
info!(
"hid event: {:?} due to existing deletion by author: {:?}",
e.get_event_id_prefix(),
e.get_author_prefix()
);
let _update_count =
tx.execute("UPDATE event SET hidden=TRUE WHERE id=?", params![ev_id])?;
// event was deleted, so let caller know nothing new
// arrived, preventing this from being sent to active
// subscriptions
ins_count = 0;
} }
} }
tx.commit()?; tx.commit()?;
Ok(ins_count) Ok(ins_count)
} }
/// Event resulting from a specific subscription request /// Serialized event associated with a specific subscription request.
#[derive(PartialEq, Debug, Clone)] #[derive(PartialEq, Eq, Debug, Clone)]
pub struct QueryResult { pub struct QueryResult {
/// Subscription identifier /// Subscription identifier
pub sub_id: String, pub sub_id: String,
@@ -266,99 +408,202 @@ pub struct QueryResult {
pub event: String, pub event: String,
} }
/// Check if a string contains only hex characters. /// Produce a arbitrary list of '?' parameters.
fn is_hex(s: &str) -> bool { fn repeat_vars(count: usize) -> String {
s.chars().all(|x| char::is_ascii_hexdigit(&x)) if count == 0 {
return "".to_owned();
}
let mut s = "?,".repeat(count);
// Remove trailing comma
s.pop();
s
} }
/// Create a dynamic SQL query string from a subscription. /// Create a dynamic SQL subquery and params from a subscription filter.
fn query_from_sub(sub: &Subscription) -> String { fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
// build a dynamic SQL query. all user-input is either an integer // build a dynamic SQL query. all user-input is either an integer
// (sqli-safe), or a string that is filtered to only contain // (sqli-safe), or a string that is filtered to only contain
// hexadecimal characters. // hexadecimal characters. Strings that require escaping (tag
let mut query = // names/values) use parameters.
"SELECT DISTINCT(e.content) FROM event e LEFT JOIN event_ref er ON e.id=er.event_id LEFT JOIN pubkey_ref pr ON e.id=pr.event_id "
.to_owned(); // if the filter is malformed, don't return anything.
// for every filter in the subscription, generate a where clause if f.force_no_match {
let mut filter_clauses: Vec<String> = Vec::new(); let empty_query =
for f in sub.filters.iter() { "SELECT DISTINCT(e.content), e.created_at FROM event e WHERE 1=0".to_owned();
// individual filter components // query parameters for SQLite
let mut filter_components: Vec<String> = Vec::new(); let empty_params: Vec<Box<dyn ToSql>> = vec![];
// Query for "authors" return (empty_query, empty_params);
if f.authors.is_some() { }
let authors_escaped: Vec<String> = f
.authors let mut query = "SELECT DISTINCT(e.content), e.created_at FROM event e".to_owned();
.as_ref() // query parameters for SQLite
.unwrap() let mut params: Vec<Box<dyn ToSql>> = vec![];
.iter()
.filter(|&x| is_hex(x)) // individual filter components (single conditions such as an author or event ID)
.map(|x| format!("x'{}'", x)) let mut filter_components: Vec<String> = Vec::new();
.collect(); // Query for "authors", allowing prefix matches
let authors_clause = format!("author IN ({})", authors_escaped.join(", ")); if let Some(authvec) = &f.authors {
// take each author and convert to a hexsearch
let mut auth_searches: Vec<String> = vec![];
for auth in authvec {
match hex_range(auth) {
Some(HexSearch::Exact(ex)) => {
auth_searches.push("author=? OR delegated_by=?".to_owned());
params.push(Box::new(ex.clone()));
params.push(Box::new(ex));
}
Some(HexSearch::Range(lower, upper)) => {
auth_searches.push(
"(author>? AND author<?) OR (delegated_by>? AND delegated_by<?)".to_owned(),
);
params.push(Box::new(lower.clone()));
params.push(Box::new(upper.clone()));
params.push(Box::new(lower));
params.push(Box::new(upper));
}
Some(HexSearch::LowerOnly(lower)) => {
auth_searches.push("author>? OR delegated_by>?".to_owned());
params.push(Box::new(lower.clone()));
params.push(Box::new(lower));
}
None => {
info!("Could not parse hex range from author {:?}", auth);
}
}
}
if !authvec.is_empty() {
let authors_clause = format!("({})", auth_searches.join(" OR "));
filter_components.push(authors_clause); filter_components.push(authors_clause);
}
// Query for Kind
if f.kind.is_some() {
// kind is number, no escaping needed
let kind_clause = format!("kind = {}", f.kind.unwrap());
filter_components.push(kind_clause);
}
// Query for event
if f.id.is_some() {
let id_str = f.id.as_ref().unwrap();
if is_hex(id_str) {
let id_clause = format!("event_hash = x'{}'", id_str);
filter_components.push(id_clause);
}
}
// Query for referenced event
if f.event.is_some() {
let ev_str = f.event.as_ref().unwrap();
if is_hex(ev_str) {
let ev_clause = format!("referenced_event = x'{}'", ev_str);
filter_components.push(ev_clause);
}
}
// Query for referenced pet name pubkey
if f.pubkey.is_some() {
let pet_str = f.pubkey.as_ref().unwrap();
if is_hex(pet_str) {
let pet_clause = format!("referenced_pubkey = x'{}'", pet_str);
filter_components.push(pet_clause);
}
}
// Query for timestamp
if f.since.is_some() {
let created_clause = format!("created_at > {}", f.since.unwrap());
filter_components.push(created_clause);
}
// Query for timestamp
if f.until.is_some() {
let until_clause = format!("created_at < {}", f.until.unwrap());
filter_components.push(until_clause);
}
// combine all clauses, and add to filter_clauses
if !filter_components.is_empty() {
let mut fc = "( ".to_owned();
fc.push_str(&filter_components.join(" AND "));
fc.push_str(" )");
filter_clauses.push(fc);
} else { } else {
// never display hidden events // if the authors list was empty, we should never return
filter_clauses.push("hidden!=FALSE".to_owned()); // any results.
filter_components.push("false".to_owned());
} }
} }
// Query for Kind
// combine all filters with OR clauses, if any exist if let Some(ks) = &f.kinds {
if !filter_clauses.is_empty() { // kind is number, no escaping needed
query.push_str(" WHERE "); let str_kinds: Vec<String> = ks.iter().map(|x| x.to_string()).collect();
query.push_str(&filter_clauses.join(" OR ")); let kind_clause = format!("kind IN ({})", str_kinds.join(", "));
filter_components.push(kind_clause);
} }
// add order clause // Query for event, allowing prefix matches
query.push_str(" ORDER BY created_at ASC"); if let Some(idvec) = &f.ids {
debug!("query string: {}", query); // take each author and convert to a hexsearch
query let mut id_searches: Vec<String> = vec![];
for id in idvec {
match hex_range(id) {
Some(HexSearch::Exact(ex)) => {
id_searches.push("event_hash=?".to_owned());
params.push(Box::new(ex));
}
Some(HexSearch::Range(lower, upper)) => {
id_searches.push("(event_hash>? AND event_hash<?)".to_owned());
params.push(Box::new(lower));
params.push(Box::new(upper));
}
Some(HexSearch::LowerOnly(lower)) => {
id_searches.push("event_hash>?".to_owned());
params.push(Box::new(lower));
}
None => {
info!("Could not parse hex range from id {:?}", id);
}
}
}
if !idvec.is_empty() {
let id_clause = format!("({})", id_searches.join(" OR "));
filter_components.push(id_clause);
} else {
// if the ids list was empty, we should never return
// any results.
filter_components.push("false".to_owned());
}
}
// Query for tags
if let Some(map) = &f.tags {
for (key, val) in map.iter() {
let mut str_vals: Vec<Box<dyn ToSql>> = vec![];
let mut blob_vals: Vec<Box<dyn ToSql>> = vec![];
for v in val {
if (v.len() % 2 == 0) && is_lower_hex(v) {
if let Ok(h) = hex::decode(v) {
blob_vals.push(Box::new(h));
}
} else {
str_vals.push(Box::new(v.to_owned()));
}
}
// create clauses with "?" params for each tag value being searched
let str_clause = format!("value IN ({})", repeat_vars(str_vals.len()));
let blob_clause = format!("value_hex IN ({})", repeat_vars(blob_vals.len()));
// find evidence of the target tag name/value existing for this event.
let tag_clause = format!("e.id IN (SELECT e.id FROM event e LEFT JOIN tag t on e.id=t.event_id WHERE hidden!=TRUE and (name=? AND ({} OR {})))", str_clause, blob_clause);
// add the tag name as the first parameter
params.push(Box::new(key.to_string()));
// add all tag values that are plain strings as params
params.append(&mut str_vals);
// add all tag values that are blobs as params
params.append(&mut blob_vals);
filter_components.push(tag_clause);
}
}
// Query for timestamp
if f.since.is_some() {
let created_clause = format!("created_at > {}", f.since.unwrap());
filter_components.push(created_clause);
}
// Query for timestamp
if f.until.is_some() {
let until_clause = format!("created_at < {}", f.until.unwrap());
filter_components.push(until_clause);
}
// never display hidden events
query.push_str(" WHERE hidden!=TRUE");
// build filter component conditions
if !filter_components.is_empty() {
query.push_str(" AND ");
query.push_str(&filter_components.join(" AND "));
}
// Apply per-filter limit to this subquery.
// The use of a LIMIT implies a DESC order, to capture only the most recent events.
if let Some(lim) = f.limit {
let _ = write!(query, " ORDER BY e.created_at DESC LIMIT {}", lim);
} else {
query.push_str(" ORDER BY e.created_at ASC")
}
(query, params)
}
/// Create a dynamic SQL query string and params from a subscription.
fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
// build a dynamic SQL query for an entire subscription, based on
// SQL subqueries for filters.
let mut subqueries: Vec<String> = Vec::new();
// subquery params
let mut params: Vec<Box<dyn ToSql>> = vec![];
// for every filter in the subscription, generate a subquery
for f in sub.filters.iter() {
let (f_subquery, mut f_params) = query_from_filter(f);
subqueries.push(f_subquery);
params.append(&mut f_params);
}
// encapsulate subqueries into select statements
let subqueries_selects: Vec<String> = subqueries
.iter()
.map(|s| format!("SELECT content, created_at FROM ({})", s))
.collect();
let query: String = subqueries_selects.join(" UNION ");
(query, params)
}
fn log_pool_stats(pool: &SqlitePool) {
let state: r2d2::State = pool.state();
let in_use_cxns = state.connections - state.idle_connections;
debug!(
"DB pool usage (in_use: {}, available: {})",
in_use_cxns, state.connections
);
} }
/// Perform a database query using a subscription. /// Perform a database query using a subscription.
@@ -369,35 +614,87 @@ fn query_from_sub(sub: &Subscription) -> String {
/// query is immediately aborted. /// query is immediately aborted.
pub async fn db_query( pub async fn db_query(
sub: Subscription, sub: Subscription,
client_id: String,
pool: SqlitePool,
query_tx: tokio::sync::mpsc::Sender<QueryResult>, query_tx: tokio::sync::mpsc::Sender<QueryResult>,
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>, mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
) { ) {
task::spawn_blocking(move || { task::spawn_blocking(move || {
let conn = let mut row_count: usize = 0;
Connection::open_with_flags(Path::new(DB_FILE), OpenFlags::SQLITE_OPEN_READ_ONLY) let start = Instant::now();
.unwrap();
debug!("opened database for reading");
debug!("going to query for: {:?}", sub);
// generate SQL query // generate SQL query
let q = query_from_sub(&sub); let (q, p) = query_from_sub(&sub);
// execute the query trace!("SQL generated in {:?}", start.elapsed());
let mut stmt = conn.prepare(&q).unwrap(); // show pool stats
let mut event_rows = stmt.query([]).unwrap(); log_pool_stats(&pool);
while let Some(row) = event_rows.next().unwrap() { // cutoff for displaying slow queries
// check if this is still active (we could do this every N rows) let slow_cutoff = Duration::from_millis(1000);
if abandon_query_rx.try_recv().is_ok() { let start = Instant::now();
debug!("query aborted"); if let Ok(conn) = pool.get() {
return; // execute the query. Don't cache, since queries vary so much.
let mut stmt = conn.prepare(&q)?;
let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?;
let mut first_result = true;
while let Some(row) = event_rows.next()? {
if first_result {
let first_result_elapsed = start.elapsed();
// logging for slow queries; show sub and SQL
if first_result_elapsed >= slow_cutoff {
info!(
"going to query for: {:?} (cid: {}, sub: {:?})",
sub, client_id, sub.id
);
info!(
"final query string (slow): {} (cid: {}, sub: {:?})",
q, client_id, sub.id
);
} else {
trace!(
"going to query for: {:?} (cid: {}, sub: {:?})",
sub,
client_id,
sub.id
);
trace!("final query string: {}", q);
}
debug!(
"first result in {:?} (cid: {}, sub: {:?})",
first_result_elapsed, client_id, sub.id
);
first_result = false;
}
// check if this is still active
// TODO: check every N rows
if abandon_query_rx.try_recv().is_ok() {
debug!("query aborted (cid: {}, sub: {:?})", client_id, sub.id);
return Ok(());
}
row_count += 1;
let event_json = row.get(0)?;
query_tx
.blocking_send(QueryResult {
sub_id: sub.get_id(),
event: event_json,
})
.ok();
} }
// TODO: check before unwrapping
let event_json = row.get(0).unwrap();
query_tx query_tx
.blocking_send(QueryResult { .blocking_send(QueryResult {
sub_id: sub.get_id(), sub_id: sub.get_id(),
event: event_json, event: "EOSE".to_string(),
}) })
.ok(); .ok();
debug!(
"query completed in {:?} (cid: {}, sub: {:?}, rows: {})",
start.elapsed(),
client_id,
sub.id,
row_count
);
} else {
warn!("Could not get a database connection for querying");
} }
debug!("query completed"); let ok: Result<()> = Ok(());
ok
}); });
} }

416
src/delegation.rs Normal file
View File

@@ -0,0 +1,416 @@
//! Event parsing and validation
use crate::error::Error;
use crate::error::Result;
use crate::event::Event;
use bitcoin_hashes::{sha256, Hash};
use lazy_static::lazy_static;
use regex::Regex;
use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
use serde::{Deserialize, Serialize};
use std::str::FromStr;
use tracing::{debug, info};
// This handles everything related to delegation, in particular the
// condition/rune parsing and logic.
// Conditions are poorly specified, so we will implement the minimum
// necessary for now.
// fields MUST be either "kind" or "created_at".
// operators supported are ">", "<", "=", "!".
// no operations on 'content' are supported.
// this allows constraints for:
// valid date ranges (valid from X->Y dates).
// specific kinds (publish kind=1,5)
// kind ranges (publish ephemeral events, kind>19999&kind<30001)
// for more complex scenarios (allow delegatee to publish ephemeral
// AND replacement events), it may be necessary to generate and use
// different condition strings, since we do not support grouping or
// "OR" logic.
lazy_static! {
/// Secp256k1 verification instance.
pub static ref SECP: Secp256k1<VerifyOnly> = Secp256k1::verification_only();
}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub enum Field {
Kind,
CreatedAt,
}
impl FromStr for Field {
type Err = Error;
fn from_str(value: &str) -> Result<Self, Self::Err> {
if value == "kind" {
Ok(Field::Kind)
} else if value == "created_at" {
Ok(Field::CreatedAt)
} else {
Err(Error::DelegationParseError)
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub enum Operator {
LessThan,
GreaterThan,
Equals,
NotEquals,
}
impl FromStr for Operator {
type Err = Error;
fn from_str(value: &str) -> Result<Self, Self::Err> {
if value == "<" {
Ok(Operator::LessThan)
} else if value == ">" {
Ok(Operator::GreaterThan)
} else if value == "=" {
Ok(Operator::Equals)
} else if value == "!" {
Ok(Operator::NotEquals)
} else {
Err(Error::DelegationParseError)
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct ConditionQuery {
pub(crate) conditions: Vec<Condition>,
}
impl ConditionQuery {
pub fn allows_event(&self, event: &Event) -> bool {
// check each condition, to ensure that the event complies
// with the restriction.
for c in &self.conditions {
if !c.allows_event(event) {
// any failing conditions invalidates the delegation
// on this event
return false;
}
}
// delegation was permitted unconditionally, or all conditions
// were true
true
}
}
// Verify that the delegator approved the delegation; return a ConditionQuery if so.
pub fn validate_delegation(
delegator: &str,
delegatee: &str,
cond_query: &str,
sigstr: &str,
) -> Option<ConditionQuery> {
// form the token
let tok = format!("nostr:delegation:{}:{}", delegatee, cond_query);
// form SHA256 hash
let digest: sha256::Hash = sha256::Hash::hash(tok.as_bytes());
let sig = schnorr::Signature::from_str(sigstr).unwrap();
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
if let Ok(pubkey) = XOnlyPublicKey::from_str(delegator) {
let verify = SECP.verify_schnorr(&sig, &msg, &pubkey);
if verify.is_ok() {
// return the parsed condition query
cond_query.parse::<ConditionQuery>().ok()
} else {
debug!("client sent an delegation signature that did not validate");
None
}
} else {
debug!("client sent malformed delegation pubkey");
None
}
} else {
info!("error converting delegation digest to secp256k1 message");
None
}
}
/// Parsed delegation condition
/// see https://github.com/nostr-protocol/nips/pull/28#pullrequestreview-1084903800
/// An example complex condition would be: kind=1,2,3&created_at<1665265999
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct Condition {
pub(crate) field: Field,
pub(crate) operator: Operator,
pub(crate) values: Vec<u64>,
}
impl Condition {
/// Check if this condition allows the given event to be delegated
pub fn allows_event(&self, event: &Event) -> bool {
// determine what the right-hand side of the operator is
let resolved_field = match &self.field {
Field::Kind => event.kind,
Field::CreatedAt => event.created_at,
};
match &self.operator {
Operator::LessThan => {
// the less-than operator is only valid for single values.
if self.values.len() == 1 {
if let Some(v) = self.values.first() {
return resolved_field < *v;
}
}
}
Operator::GreaterThan => {
// the greater-than operator is only valid for single values.
if self.values.len() == 1 {
if let Some(v) = self.values.first() {
return resolved_field > *v;
}
}
}
Operator::Equals => {
// equals is interpreted as "must be equal to at least one provided value"
return self.values.iter().any(|&x| resolved_field == x);
}
Operator::NotEquals => {
// not-equals is interpreted as "must not be equal to any provided value"
// this is the one case where an empty list of values could be allowed; even though it is a pointless restriction.
return self.values.iter().all(|&x| resolved_field != x);
}
}
false
}
}
fn str_to_condition(cs: &str) -> Option<Condition> {
// a condition is a string (alphanum+underscore), an operator (<>=!), and values (num+comma)
lazy_static! {
static ref RE: Regex = Regex::new("([[:word:]]+)([<>=!]+)([,[[:digit:]]]*)").unwrap();
}
// match against the regex
let caps = RE.captures(cs)?;
let field = caps.get(1)?.as_str().parse::<Field>().ok()?;
let operator = caps.get(2)?.as_str().parse::<Operator>().ok()?;
// values are just comma separated numbers, but all must be parsed
let rawvals = caps.get(3)?.as_str();
let values = rawvals
.split_terminator(',')
.map(|n| n.parse::<u64>().ok())
.collect::<Option<Vec<_>>>()?;
// convert field string into Field
Some(Condition {
field,
operator,
values,
})
}
/// Parse a condition query from a string slice
impl FromStr for ConditionQuery {
type Err = Error;
fn from_str(value: &str) -> Result<Self, Self::Err> {
// split the string with '&'
let mut conditions = vec![];
let condstrs = value.split_terminator('&');
// parse each individual condition
for c in condstrs {
conditions.push(str_to_condition(c).ok_or(Error::DelegationParseError)?);
}
Ok(ConditionQuery { conditions })
}
}
#[cfg(test)]
mod tests {
use super::*;
// parse condition strings
#[test]
fn parse_empty() -> Result<()> {
// given an empty condition query, produce an empty vector
let empty_cq = ConditionQuery { conditions: vec![] };
let parsed = "".parse::<ConditionQuery>()?;
assert_eq!(parsed, empty_cq);
Ok(())
}
// parse field 'kind'
#[test]
fn test_kind_field_parse() -> Result<()> {
let field = "kind".parse::<Field>()?;
assert_eq!(field, Field::Kind);
Ok(())
}
// parse field 'created_at'
#[test]
fn test_created_at_field_parse() -> Result<()> {
let field = "created_at".parse::<Field>()?;
assert_eq!(field, Field::CreatedAt);
Ok(())
}
// parse unknown field
#[test]
fn unknown_field_parse() {
let field = "unk".parse::<Field>();
assert!(field.is_err());
}
// parse a full conditional query with an empty array
#[test]
fn parse_kind_equals_empty() -> Result<()> {
// given an empty condition query, produce an empty vector
let kind_cq = ConditionQuery {
conditions: vec![Condition {
field: Field::Kind,
operator: Operator::Equals,
values: vec![],
}],
};
let parsed = "kind=".parse::<ConditionQuery>()?;
assert_eq!(parsed, kind_cq);
Ok(())
}
// parse a full conditional query with a single value
#[test]
fn parse_kind_equals_singleval() -> Result<()> {
// given an empty condition query, produce an empty vector
let kind_cq = ConditionQuery {
conditions: vec![Condition {
field: Field::Kind,
operator: Operator::Equals,
values: vec![1],
}],
};
let parsed = "kind=1".parse::<ConditionQuery>()?;
assert_eq!(parsed, kind_cq);
Ok(())
}
// parse a full conditional query with multiple values
#[test]
fn parse_kind_equals_multival() -> Result<()> {
// given an empty condition query, produce an empty vector
let kind_cq = ConditionQuery {
conditions: vec![Condition {
field: Field::Kind,
operator: Operator::Equals,
values: vec![1, 2, 4],
}],
};
let parsed = "kind=1,2,4".parse::<ConditionQuery>()?;
assert_eq!(parsed, kind_cq);
Ok(())
}
// parse multiple conditions
#[test]
fn parse_multi_conditions() -> Result<()> {
// given an empty condition query, produce an empty vector
let cq = ConditionQuery {
conditions: vec![
Condition {
field: Field::Kind,
operator: Operator::GreaterThan,
values: vec![10000],
},
Condition {
field: Field::Kind,
operator: Operator::LessThan,
values: vec![20000],
},
Condition {
field: Field::Kind,
operator: Operator::NotEquals,
values: vec![10001],
},
Condition {
field: Field::CreatedAt,
operator: Operator::LessThan,
values: vec![1665867123],
},
],
};
let parsed =
"kind>10000&kind<20000&kind!10001&created_at<1665867123".parse::<ConditionQuery>()?;
assert_eq!(parsed, cq);
Ok(())
}
fn simple_event() -> Event {
Event {
id: "0".to_owned(),
pubkey: "0".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: vec![],
content: "".to_owned(),
sig: "0".to_owned(),
tagidx: None,
}
}
// Check for condition logic on event w/ empty values
#[test]
fn condition_with_empty_values() {
let mut c = Condition {
field: Field::Kind,
operator: Operator::GreaterThan,
values: vec![],
};
let e = simple_event();
assert!(!c.allows_event(&e));
c.operator = Operator::LessThan;
assert!(!c.allows_event(&e));
c.operator = Operator::Equals;
assert!(!c.allows_event(&e));
// Not Equals applied to an empty list *is* allowed
// (pointless, but logically valid).
c.operator = Operator::NotEquals;
assert!(c.allows_event(&e));
}
// Check for condition logic on event w/ single value
#[test]
fn condition_kind_gt_event_single() {
let c = Condition {
field: Field::Kind,
operator: Operator::GreaterThan,
values: vec![10],
};
let mut e = simple_event();
// kind is not greater than 10, not allowed
e.kind = 1;
assert!(!c.allows_event(&e));
// kind is greater than 10, allowed
e.kind = 100;
assert!(c.allows_event(&e));
// kind is 10, not allowed
e.kind = 10;
assert!(!c.allows_event(&e));
}
// Check for condition logic on event w/ multi values
#[test]
fn condition_with_multi_values() {
let mut c = Condition {
field: Field::Kind,
operator: Operator::Equals,
values: vec![0, 10, 20],
};
let mut e = simple_event();
// Allow if event kind is in list for Equals
e.kind = 10;
assert!(c.allows_event(&e));
// Deny if event kind is not in list for Equals
e.kind = 11;
assert!(!c.allows_event(&e));
// Deny if event kind is in list for NotEquals
e.kind = 10;
c.operator = Operator::NotEquals;
assert!(!c.allows_event(&e));
// Allow if event kind is not in list for NotEquals
e.kind = 99;
c.operator = Operator::NotEquals;
assert!(c.allows_event(&e));
// Always deny if GreaterThan/LessThan for a list
c.operator = Operator::LessThan;
assert!(!c.allows_event(&e));
c.operator = Operator::GreaterThan;
assert!(!c.allows_event(&e));
}
}

View File

@@ -17,10 +17,18 @@ pub enum Error {
ConnWriteError, ConnWriteError,
#[error("EVENT parse failed")] #[error("EVENT parse failed")]
EventParseFailed, EventParseFailed,
#[error("ClOSE message parse failed")] #[error("CLOSE message parse failed")]
CloseParseFailed, CloseParseFailed,
#[error("Event validation failed")] #[error("Event invalid signature")]
EventInvalid, EventInvalidSignature,
#[error("Event invalid id")]
EventInvalidId,
#[error("Event malformed pubkey")]
EventMalformedPubkey,
#[error("Event could not canonicalize")]
EventCouldNotCanonicalize,
#[error("Event too large")]
EventMaxLengthError(usize),
#[error("Subscription identifier max length exceeded")] #[error("Subscription identifier max length exceeded")]
SubIdMaxLengthError, SubIdMaxLengthError,
#[error("Maximum concurrent subscription count reached")] #[error("Maximum concurrent subscription count reached")]
@@ -36,6 +44,53 @@ pub enum Error {
SqlError(rusqlite::Error), SqlError(rusqlite::Error),
#[error("Config error")] #[error("Config error")]
ConfigError(config::ConfigError), ConfigError(config::ConfigError),
#[error("Data directory does not exist")]
DatabaseDirError,
#[error("Database Connection Pool Error")]
DatabasePoolError(r2d2::Error),
#[error("Custom Error : {0}")]
CustomError(String),
#[error("Task join error")]
JoinError,
#[error("Hyper Client error")]
HyperError(hyper::Error),
#[error("Hex encoding error")]
HexError(hex::FromHexError),
#[error("Delegation parse error")]
DelegationParseError,
#[error("Unknown/Undocumented")]
UnknownError,
}
//impl From<Box<dyn std::error::Error>> for Error {
// fn from(e: Box<dyn std::error::Error>) -> Self {
// Error::CustomError("error".to_owned())
// }
//}
impl From<hex::FromHexError> for Error {
fn from(h: hex::FromHexError) -> Self {
Error::HexError(h)
}
}
impl From<hyper::Error> for Error {
fn from(h: hyper::Error) -> Self {
Error::HyperError(h)
}
}
impl From<r2d2::Error> for Error {
fn from(d: r2d2::Error) -> Self {
Error::DatabasePoolError(d)
}
}
impl From<tokio::task::JoinError> for Error {
/// Wrap SQL error
fn from(_j: tokio::task::JoinError) -> Self {
Error::JoinError
}
} }
impl From<rusqlite::Error> for Error { impl From<rusqlite::Error> for Error {

View File

@@ -1,28 +1,45 @@
//! Event parsing and validation //! Event parsing and validation
use crate::config; use crate::delegation::validate_delegation;
use crate::error::Error::*; use crate::error::Error::*;
use crate::error::Result; use crate::error::Result;
use crate::nip05;
use crate::utils::unix_time;
use bitcoin_hashes::{sha256, Hash}; use bitcoin_hashes::{sha256, Hash};
use log::*; use lazy_static::lazy_static;
use secp256k1::{schnorrsig, Secp256k1}; use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
use serde::{Deserialize, Deserializer, Serialize}; use serde::{Deserialize, Deserializer, Serialize};
use serde_json::value::Value; use serde_json::value::Value;
use serde_json::Number; use serde_json::Number;
use std::collections::HashMap;
use std::collections::HashSet;
use std::str::FromStr; use std::str::FromStr;
use std::time::SystemTime; use tracing::{debug, info};
/// Event command in network format lazy_static! {
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] /// Secp256k1 verification instance.
pub static ref SECP: Secp256k1<VerifyOnly> = Secp256k1::verification_only();
}
/// Event command in network format.
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct EventCmd { pub struct EventCmd {
cmd: String, // expecting static "EVENT" cmd: String, // expecting static "EVENT"
event: Event, event: Event,
} }
/// Event parsed impl EventCmd {
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] pub fn event_id(&self) -> &str {
&self.event.id
}
}
/// Parsed nostr event.
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct Event { pub struct Event {
pub id: String, pub id: String,
pub(crate) pubkey: String, pub(crate) pubkey: String,
#[serde(skip)]
pub(crate) delegated_by: Option<String>,
pub(crate) created_at: u64, pub(crate) created_at: u64,
pub(crate) kind: u64, pub(crate) kind: u64,
#[serde(deserialize_with = "tag_from_string")] #[serde(deserialize_with = "tag_from_string")]
@@ -30,6 +47,9 @@ pub struct Event {
pub(crate) tags: Vec<Vec<String>>, pub(crate) tags: Vec<Vec<String>>,
pub(crate) content: String, pub(crate) content: String,
pub(crate) sig: String, pub(crate) sig: String,
// Optimization for tag search, built on demand.
#[serde(skip)]
pub(crate) tagidx: Option<HashMap<char, HashSet<String>>>,
} }
/// Simple tag type for array of array of strings. /// Simple tag type for array of array of strings.
@@ -41,7 +61,26 @@ where
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
let opt = Option::deserialize(deserializer)?; let opt = Option::deserialize(deserializer)?;
Ok(opt.unwrap_or_else(Vec::new)) Ok(opt.unwrap_or_default())
}
/// Attempt to form a single-char tag name.
pub fn single_char_tagname(tagname: &str) -> Option<char> {
// We return the tag character if and only if the tagname consists
// of a single char.
let mut tagnamechars = tagname.chars();
let firstchar = tagnamechars.next();
match firstchar {
Some(_) => {
// check second char
if tagnamechars.next().is_none() {
firstchar
} else {
None
}
}
None => None,
}
} }
/// Convert network event to parsed/validated event. /// Convert network event to parsed/validated event.
@@ -50,46 +89,145 @@ impl From<EventCmd> for Result<Event> {
// ensure command is correct // ensure command is correct
if ec.cmd != "EVENT" { if ec.cmd != "EVENT" {
Err(CommandUnknownError) Err(CommandUnknownError)
} else if ec.event.is_valid() {
Ok(ec.event)
} else { } else {
Err(EventInvalid) ec.event.validate().map(|_| {
let mut e = ec.event;
e.build_index();
e.update_delegation();
e
})
} }
} }
} }
/// Seconds since 1970
fn unix_time() -> u64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
.unwrap_or(0)
}
impl Event { impl Event {
pub fn is_kind_metadata(&self) -> bool {
self.kind == 0
}
/// Pull a NIP-05 Name out of the event, if one exists
pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
if self.is_kind_metadata() {
// very quick check if we should attempt to parse this json
if self.content.contains("\"nip05\"") {
// Parse into JSON
let md_parsed: Value = serde_json::from_str(&self.content).ok()?;
let md_map = md_parsed.as_object()?;
let nip05_str = md_map.get("nip05")?.as_str()?;
return nip05::Nip05Name::try_from(nip05_str).ok();
}
}
None
}
// is this event delegated (properly)?
// does the signature match, and are conditions valid?
// if so, return an alternate author for the event
pub fn delegated_author(&self) -> Option<String> {
// is there a delegation tag?
let delegation_tag: Vec<String> = self
.tags
.iter()
.filter(|x| x.len() == 4)
.filter(|x| x.get(0).unwrap() == "delegation")
.take(1)
.next()?
.to_vec(); // get first tag
//let delegation_tag = self.tag_values_by_name("delegation");
// delegation tags should have exactly 3 elements after the name (pubkey, condition, sig)
// the event is signed by the delagatee
let delegatee = &self.pubkey;
// the delegation tag references the claimed delagator
let delegator: &str = delegation_tag.get(1)?;
let querystr: &str = delegation_tag.get(2)?;
let sig: &str = delegation_tag.get(3)?;
// attempt to get a condition query; this requires the delegation to have a valid signature.
if let Some(cond_query) = validate_delegation(delegator, delegatee, querystr, sig) {
// The signature was valid, now we ensure the delegation
// condition is valid for this event:
if cond_query.allows_event(self) {
// since this is allowed, we will provide the delegatee
Some(delegator.into())
} else {
debug!("an event failed to satisfy delegation conditions");
None
}
} else {
debug!("event had had invalid delegation signature");
None
}
}
/// Update delegation status
fn update_delegation(&mut self) {
self.delegated_by = self.delegated_author();
}
/// Build an event tag index
fn build_index(&mut self) {
// if there are no tags; just leave the index as None
if self.tags.is_empty() {
return;
}
// otherwise, build an index
let mut idx: HashMap<char, HashSet<String>> = HashMap::new();
// iterate over tags that have at least 2 elements
for t in self.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
let tagnamechar = tagnamechar_opt.unwrap();
let tagval = t.get(1).unwrap();
// ensure a vector exists for this tag
idx.entry(tagnamechar).or_insert_with(HashSet::new);
// get the tag vec and insert entry
let idx_tag_vec = idx.get_mut(&tagnamechar).expect("could not get tag vector");
idx_tag_vec.insert(tagval.clone());
}
// save the tag structure
self.tagidx = Some(idx);
}
/// Create a short event identifier, suitable for logging. /// Create a short event identifier, suitable for logging.
pub fn get_event_id_prefix(&self) -> String { pub fn get_event_id_prefix(&self) -> String {
self.id.chars().take(8).collect() self.id.chars().take(8).collect()
} }
pub fn get_author_prefix(&self) -> String {
self.pubkey.chars().take(8).collect()
}
/// Check if this event has a valid signature. /// Retrieve tag initial values across all tags matching the name
fn is_valid(&self) -> bool { pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
// TODO: return a Result with a reason for invalid events self.tags
// don't bother to validate an event with a timestamp in the distant future. .iter()
let config = config::SETTINGS.read().unwrap(); .filter(|x| x.len() > 1)
let max_future_sec = config.options.reject_future_seconds; .filter(|x| x.get(0).unwrap() == tag_name)
if let Some(allowable_future) = max_future_sec { .map(|x| x.get(1).unwrap().to_owned())
.collect()
}
pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
if let Some(allowable_future) = reject_future_seconds {
let curr_time = unix_time(); let curr_time = unix_time();
// calculate difference, plus how far future we allow // calculate difference, plus how far future we allow
if curr_time + (allowable_future as u64) < self.created_at { if curr_time + (allowable_future as u64) < self.created_at {
let delta = self.created_at - curr_time; let delta = self.created_at - curr_time;
debug!( debug!(
"Event is too far in the future ({} seconds), rejecting", "event is too far in the future ({} seconds), rejecting",
delta delta
); );
return false; return false;
} }
} }
true
}
/// Check if this event has a valid signature.
fn validate(&self) -> Result<()> {
// TODO: return a Result with a reason for invalid events
// validation is performed by: // validation is performed by:
// * parsing JSON string into event fields // * parsing JSON string into event fields
// * create an array: // * create an array:
@@ -97,8 +235,8 @@ impl Event {
// * serialize with no spaces/newlines // * serialize with no spaces/newlines
let c_opt = self.to_canonical(); let c_opt = self.to_canonical();
if c_opt.is_none() { if c_opt.is_none() {
info!("event could not be canonicalized"); debug!("could not canonicalize");
return false; return Err(EventCouldNotCanonicalize);
} }
let c = c_opt.unwrap(); let c = c_opt.unwrap();
// * compute the sha256sum. // * compute the sha256sum.
@@ -106,15 +244,23 @@ impl Event {
let hex_digest = format!("{:x}", digest); let hex_digest = format!("{:x}", digest);
// * ensure the id matches the computed sha256sum. // * ensure the id matches the computed sha256sum.
if self.id != hex_digest { if self.id != hex_digest {
return false; debug!("event id does not match digest");
return Err(EventInvalidId);
} }
// * validate the message digest (sig) using the pubkey & computed sha256 message hash. // * validate the message digest (sig) using the pubkey & computed sha256 message hash.
let secp = Secp256k1::new(); let sig = schnorr::Signature::from_str(&self.sig).unwrap();
let sig = schnorrsig::Signature::from_str(&self.sig).unwrap(); if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
let message = secp256k1::Message::from(digest); if let Ok(pubkey) = XOnlyPublicKey::from_str(&self.pubkey) {
let pubkey = schnorrsig::PublicKey::from_str(&self.pubkey).unwrap(); SECP.verify_schnorr(&sig, &msg, &pubkey)
let verify = secp.schnorrsig_verify(&sig, &message, &pubkey); .map_err(|_| EventInvalidSignature)
matches!(verify, Ok(())) } else {
debug!("client sent malformed pubkey");
Err(EventMalformedPubkey)
}
} else {
info!("error converting digest to secp256k1 message");
Err(EventInvalidSignature)
}
} }
/// Convert event to canonical representation for signing. /// Convert event to canonical representation for signing.
@@ -154,36 +300,19 @@ impl Event {
serde_json::Value::Array(tags) serde_json::Value::Array(tags)
} }
/// Get a list of event tags. /// Determine if the given tag and value set intersect with tags in this event.
pub fn get_event_tags(&self) -> Vec<&str> { pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
let mut etags = vec![]; match &self.tagidx {
for t in self.tags.iter() { // check if this is indexable tagname
if t.len() >= 2 && t.get(0).unwrap() == "e" { Some(idx) => match idx.get(&tagname) {
etags.push(&t.get(1).unwrap()[..]); Some(valset) => {
} let common = valset.intersection(check);
common.count() > 0
}
None => false,
},
None => false,
} }
etags
}
/// Get a list of pubkey/petname tags.
pub fn get_pubkey_tags(&self) -> Vec<&str> {
let mut ptags = vec![];
for t in self.tags.iter() {
if t.len() >= 2 && t.get(0).unwrap() == "p" {
ptags.push(&t.get(1).unwrap()[..]);
}
}
ptags
}
/// Check if a given event is referenced in an event tag.
pub fn event_tag_match(&self, eventid: &str) -> bool {
self.get_event_tags().contains(&eventid)
}
/// Check if a given event is referenced in an event tag.
pub fn pubkey_tag_match(&self, pubkey: &str) -> bool {
self.get_pubkey_tags().contains(&pubkey)
} }
} }
@@ -194,11 +323,13 @@ mod tests {
Event { Event {
id: "0".to_owned(), id: "0".to_owned(),
pubkey: "0".to_owned(), pubkey: "0".to_owned(),
delegated_by: None,
created_at: 0, created_at: 0,
kind: 0, kind: 0,
tags: vec![], tags: vec![],
content: "".to_owned(), content: "".to_owned(),
sig: "0".to_owned(), sig: "0".to_owned(),
tagidx: None,
} }
} }
@@ -219,18 +350,24 @@ mod tests {
} }
#[test] #[test]
fn empty_event_tag_match() -> Result<()> { fn empty_event_tag_match() {
let event = simple_event(); let event = simple_event();
assert!(!event.event_tag_match("foo")); assert!(!event
Ok(()) .generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
} }
#[test] #[test]
fn single_event_tag_match() -> Result<()> { fn single_event_tag_match() {
let mut event = simple_event(); let mut event = simple_event();
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]]; event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
assert!(event.event_tag_match("foo")); event.build_index();
Ok(()) assert_eq!(
event.generic_tag_val_intersect(
'e',
&HashSet::from(["foo".to_owned(), "bar".to_owned()])
),
true
);
} }
#[test] #[test]
@@ -268,22 +405,79 @@ mod tests {
let e = Event { let e = Event {
id: "999".to_owned(), id: "999".to_owned(),
pubkey: "012345".to_owned(), pubkey: "012345".to_owned(),
delegated_by: None,
created_at: 501234, created_at: 501234,
kind: 1, kind: 1,
tags: vec![], tags: vec![],
content: "this is a test".to_owned(), content: "this is a test".to_owned(),
sig: "abcde".to_owned(), sig: "abcde".to_owned(),
tagidx: None,
}; };
let c = e.to_canonical(); let c = e.to_canonical();
let expected = Some(r#"[0,"012345",501234,1,[],"this is a test"]"#.to_owned()); let expected = Some(r#"[0,"012345",501234,1,[],"this is a test"]"#.to_owned());
assert_eq!(c, expected); assert_eq!(c, expected);
} }
#[test]
fn event_tag_select() {
let e = Event {
id: "999".to_owned(),
pubkey: "012345".to_owned(),
delegated_by: None,
created_at: 501234,
kind: 1,
tags: vec![
vec!["j".to_owned(), "abc".to_owned()],
vec!["e".to_owned(), "foo".to_owned()],
vec!["e".to_owned(), "bar".to_owned()],
vec!["e".to_owned(), "baz".to_owned()],
vec![
"p".to_owned(),
"aaaa".to_owned(),
"ws://example.com".to_owned(),
],
],
content: "this is a test".to_owned(),
sig: "abcde".to_owned(),
tagidx: None,
};
let v = e.tag_values_by_name("e");
assert_eq!(v, vec!["foo", "bar", "baz"]);
}
#[test]
fn event_no_tag_select() {
let e = Event {
id: "999".to_owned(),
pubkey: "012345".to_owned(),
delegated_by: None,
created_at: 501234,
kind: 1,
tags: vec![
vec!["j".to_owned(), "abc".to_owned()],
vec!["e".to_owned(), "foo".to_owned()],
vec!["e".to_owned(), "baz".to_owned()],
vec![
"p".to_owned(),
"aaaa".to_owned(),
"ws://example.com".to_owned(),
],
],
content: "this is a test".to_owned(),
sig: "abcde".to_owned(),
tagidx: None,
};
let v = e.tag_values_by_name("x");
// asking for tags that don't exist just returns zero-length vector
assert_eq!(v.len(), 0);
}
#[test] #[test]
fn event_canonical_with_tags() { fn event_canonical_with_tags() {
let e = Event { let e = Event {
id: "999".to_owned(), id: "999".to_owned(),
pubkey: "012345".to_owned(), pubkey: "012345".to_owned(),
delegated_by: None,
created_at: 501234, created_at: 501234,
kind: 1, kind: 1,
tags: vec![ tags: vec![
@@ -296,6 +490,7 @@ mod tests {
], ],
content: "this is a test".to_owned(), content: "this is a test".to_owned(),
sig: "abcde".to_owned(), sig: "abcde".to_owned(),
tagidx: None,
}; };
let c = e.to_canonical(); let c = e.to_canonical();
let expected_json = r###"[0,"012345",501234,1,[["#e","aoeu"],["#p","aaaa","ws://example.com"]],"this is a test"]"###; let expected_json = r###"[0,"012345",501234,1,[["#e","aoeu"],["#p","aaaa","ws://example.com"]],"this is a test"]"###;

158
src/hexrange.rs Normal file
View File

@@ -0,0 +1,158 @@
//! Utilities for searching hexadecimal
use crate::utils::is_hex;
use hex;
/// Types of hexadecimal queries.
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum HexSearch {
// when no range is needed, exact 32-byte
Exact(Vec<u8>),
// lower (inclusive) and upper range (exclusive)
Range(Vec<u8>, Vec<u8>),
// lower bound only, upper bound is MAX inclusive
LowerOnly(Vec<u8>),
}
/// Check if a string contains only f chars
fn is_all_fs(s: &str) -> bool {
s.chars().all(|x| x == 'f' || x == 'F')
}
/// Find the next hex sequence greater than the argument.
pub fn hex_range(s: &str) -> Option<HexSearch> {
// handle special cases
if !is_hex(s) || s.len() > 64 {
return None;
}
if s.len() == 64 {
return Some(HexSearch::Exact(hex::decode(s).ok()?));
}
// if s is odd, add a zero
let mut hash_base = s.to_owned();
let mut odd = hash_base.len() % 2 != 0;
if odd {
// extend the string to make it even
hash_base.push('0');
}
let base = hex::decode(hash_base).ok()?;
// check for all ff's
if is_all_fs(s) {
// there is no higher bound, we only want to search for blobs greater than this.
return Some(HexSearch::LowerOnly(base));
}
// return a range
let mut upper = base.clone();
let mut byte_len = upper.len();
// for odd strings, we made them longer, but we want to increment the upper char (+16).
// we know we can do this without overflowing because we explicitly set the bottom half to 0's.
while byte_len > 0 {
byte_len -= 1;
// check if byte can be incremented, or if we need to carry.
let b = upper[byte_len];
if b == u8::MAX {
// reset and carry
upper[byte_len] = 0;
} else if odd {
// check if first char in this byte is NOT 'f'
if b < 240 {
upper[byte_len] = b + 16; // bump up the first character in this byte
// increment done, stop iterating through the vec
break;
}
// if it is 'f', reset the byte to 0 and do a carry
// reset and carry
upper[byte_len] = 0;
// done with odd logic, so don't repeat this
odd = false;
} else {
// bump up the first character in this byte
upper[byte_len] = b + 1;
// increment done, stop iterating
break;
}
}
Some(HexSearch::Range(base, upper))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::error::Result;
#[test]
fn hex_range_exact() -> Result<()> {
let hex = "abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00";
let r = hex_range(hex);
assert_eq!(
r,
Some(HexSearch::Exact(hex::decode(hex).expect("invalid hex")))
);
Ok(())
}
#[test]
fn hex_full_range() -> Result<()> {
let hex = "aaaa";
let hex_upper = "aaab";
let r = hex_range(hex);
assert_eq!(
r,
Some(HexSearch::Range(
hex::decode(hex).expect("invalid hex"),
hex::decode(hex_upper).expect("invalid hex")
))
);
Ok(())
}
#[test]
fn hex_full_range_odd() -> Result<()> {
let r = hex_range("abc");
assert_eq!(
r,
Some(HexSearch::Range(
hex::decode("abc0").expect("invalid hex"),
hex::decode("abd0").expect("invalid hex")
))
);
Ok(())
}
#[test]
fn hex_full_range_odd_end_f() -> Result<()> {
let r = hex_range("abf");
assert_eq!(
r,
Some(HexSearch::Range(
hex::decode("abf0").expect("invalid hex"),
hex::decode("ac00").expect("invalid hex")
))
);
Ok(())
}
#[test]
fn hex_no_upper() -> Result<()> {
let r = hex_range("ffff");
assert_eq!(
r,
Some(HexSearch::LowerOnly(
hex::decode("ffff").expect("invalid hex")
))
);
Ok(())
}
#[test]
fn hex_no_upper_odd() -> Result<()> {
let r = hex_range("fff");
assert_eq!(
r,
Some(HexSearch::LowerOnly(
hex::decode("fff0").expect("invalid hex")
))
);
Ok(())
}
}

43
src/info.rs Normal file
View File

@@ -0,0 +1,43 @@
//! Relay metadata using NIP-11
/// Relay Info
use crate::config;
use serde::{Deserialize, Serialize};
pub const CARGO_PKG_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
#[derive(Debug, Serialize, Deserialize)]
#[allow(unused)]
pub struct RelayInfo {
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub pubkey: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub contact: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub supported_nips: Option<Vec<i64>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub software: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
/// Convert an Info configuration into public Relay Info
impl From<config::Info> for RelayInfo {
fn from(i: config::Info) -> Self {
RelayInfo {
id: i.relay_url,
name: i.name,
description: i.description,
pubkey: i.pubkey,
contact: i.contact,
supported_nips: Some(vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 26]),
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
version: CARGO_PKG_VERSION.map(|x| x.to_owned()),
}
}
}

View File

@@ -2,7 +2,15 @@ pub mod close;
pub mod config; pub mod config;
pub mod conn; pub mod conn;
pub mod db; pub mod db;
pub mod delegation;
pub mod error; pub mod error;
pub mod event; pub mod event;
pub mod protostream; pub mod hexrange;
pub mod info;
pub mod nip05;
pub mod notice;
pub mod schema;
pub mod subscription; pub mod subscription;
pub mod utils;
// Public API for creating relays programatically
pub mod server;

View File

@@ -1,248 +1,51 @@
//! Server process //! Server process
use futures::SinkExt;
use futures::StreamExt;
use log::*;
use nostr_rs_relay::close::Close;
use nostr_rs_relay::config; use nostr_rs_relay::config;
use nostr_rs_relay::conn; use nostr_rs_relay::server::start_server;
use nostr_rs_relay::db; use std::env;
use nostr_rs_relay::error::{Error, Result}; use std::sync::mpsc as syncmpsc;
use nostr_rs_relay::event::Event; use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
use nostr_rs_relay::protostream; use std::thread;
use nostr_rs_relay::protostream::NostrMessage::*; use tracing::info;
use nostr_rs_relay::protostream::NostrResponse::*;
use std::collections::HashMap; use console_subscriber::ConsoleLayer;
use tokio::net::{TcpListener, TcpStream};
use tokio::runtime::Builder; /// Return a requested DB name from command line arguments.
use tokio::sync::broadcast; fn db_from_args(args: &[String]) -> Option<String> {
use tokio::sync::broadcast::{Receiver, Sender}; if args.len() == 3 && args.get(1) == Some(&"--db".to_owned()) {
use tokio::sync::mpsc; return args.get(2).map(std::clone::Clone::clone);
use tokio::sync::oneshot; }
use tungstenite::protocol::WebSocketConfig; None
}
/// Start running a Nostr relay server. /// Start running a Nostr relay server.
fn main() -> Result<(), Error> { fn main() {
// setup logger // setup tracing
let _ = env_logger::try_init(); let _trace_sub = tracing_subscriber::fmt::try_init();
{ info!("Starting up from main");
let mut settings = config::SETTINGS.write().unwrap(); // get database directory from args
// replace default settings with those read from config.toml let args: Vec<String> = env::args().collect();
let c = config::Settings::new(); let db_dir: Option<String> = db_from_args(&args);
*settings = c; // configure settings from config.toml
} // replace default settings with those read from config.toml
let config = config::SETTINGS.read().unwrap(); let mut settings = config::Settings::new();
debug!("config: {:?}", config);
let addr = format!("{}:{}", config.network.address.trim(), config.network.port);
// configure tokio runtime
let rt = Builder::new_multi_thread()
.enable_all()
.thread_name("tokio-ws")
.build()
.unwrap();
// start tokio
rt.block_on(async {
let settings = config::SETTINGS.read().unwrap();
let listener = TcpListener::bind(&addr).await.expect("Failed to bind");
info!("listening on: {}", addr);
// all client-submitted valid events are broadcast to every
// other client on this channel. This should be large enough
// to accomodate slower readers (messages are dropped if
// clients can not keep up).
let (bcast_tx, _) = broadcast::channel::<Event>(settings.limits.broadcast_buffer);
// validated events that need to be persisted are sent to the
// database on via this channel.
let (event_tx, event_rx) = mpsc::channel::<Event>(settings.limits.event_persist_buffer);
// establish a channel for letting all threads now about a
// requested server shutdown.
let (invoke_shutdown, _) = broadcast::channel::<()>(1);
let ctrl_c_shutdown = invoke_shutdown.clone();
// listen for ctrl-c interruupts
tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap();
info!("shutting down due to SIGINT");
ctrl_c_shutdown.send(()).ok();
});
// start the database writer thread. Give it a channel for
// writing events, and for publishing events that have been
// written (to all connected clients).
db::db_writer(event_rx, bcast_tx.clone(), invoke_shutdown.subscribe()).await;
// track unique client connection count if settings.diagnostics.tracing {
let mut client_accept_count: usize = 0; // enable tracing with tokio-console
let mut stop_listening = invoke_shutdown.subscribe(); ConsoleLayer::builder().with_default_env().init();
// handle new client connection requests, or SIGINT signals. }
loop { // update with database location
tokio::select! { if let Some(db) = db_dir {
_ = stop_listening.recv() => { settings.database.data_directory = db;
break; }
}
Ok((stream, _)) = listener.accept() => { let (_, ctrl_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
client_accept_count += 1; // run this in a new thread
info!("creating new connection for client #{}",client_accept_count); let handle = thread::spawn(|| {
tokio::spawn(nostr_server( // we should have a 'control plane' channel to monitor and bump the server.
stream, // this will let us do stuff like clear the database, shutdown, etc.
bcast_tx.clone(), let _svr = start_server(settings, ctrl_rx);
event_tx.clone(),
invoke_shutdown.subscribe(),
));
}
}
}
}); });
Ok(()) // block on nostr thread to finish.
} handle.join().unwrap();
/// Handle new client connections. This runs through an event loop
/// for all client communication.
async fn nostr_server(
stream: TcpStream,
broadcast: Sender<Event>,
event_tx: tokio::sync::mpsc::Sender<Event>,
mut shutdown: Receiver<()>,
) {
// get a broadcast channel for clients to communicate on
let mut bcast_rx = broadcast.subscribe();
let mut config = WebSocketConfig::default();
{
let settings = config::SETTINGS.read().unwrap();
config.max_message_size = settings.limits.max_ws_message_bytes;
config.max_frame_size = settings.limits.max_ws_frame_bytes;
}
// upgrade the TCP connection to WebSocket
let conn = tokio_tungstenite::accept_async_with_config(stream, Some(config)).await;
let ws_stream = conn.expect("websocket handshake error");
// wrap websocket into a stream & sink of Nostr protocol messages
let mut nostr_stream = protostream::wrap_ws_in_nostr(ws_stream);
// Track internal client state
let mut conn = conn::ClientConn::new();
let cid = conn.get_client_prefix();
// Create a channel for receiving query results from the database.
// we will send out the tx handle to any query we generate.
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(256);
// maintain a hashmap of a oneshot channel for active subscriptions.
// when these subscriptions are cancelled, make a message
// available to the executing query so it knows to stop.
//let (abandon_query_tx, _) = oneshot::channel::<()>();
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
// for stats, keep track of how many events the client published,
// and how many it received from queries.
let mut client_published_event_count: usize = 0;
let mut client_received_event_count: usize = 0;
info!("new connection for client: {}", cid);
loop {
tokio::select! {
_ = shutdown.recv() => {
// server shutting down, exit loop
break;
},
Some(query_result) = query_rx.recv() => {
// database informed us of a query result we asked for
let res = EventRes(query_result.sub_id,query_result.event);
client_received_event_count += 1;
nostr_stream.send(res).await.ok();
},
Ok(global_event) = bcast_rx.recv() => {
// an event has been broadcast to all clients
// first check if there is a subscription for this event.
let matching_subs = conn.get_matching_subscriptions(&global_event);
for s in matching_subs {
// TODO: serialize at broadcast time, instead of
// once for each consumer.
if let Ok(event_str) = serde_json::to_string(&global_event) {
debug!("sub match: client: {}, sub: {}, event: {}",
cid, s,
global_event.get_event_id_prefix());
// create an event response and send it
let res = EventRes(s.to_owned(),event_str);
nostr_stream.send(res).await.ok();
} else {
warn!("could not convert event to string");
}
}
},
// check if this client has a subscription
proto_next = nostr_stream.next() => {
match proto_next {
Some(Ok(EventMsg(ec))) => {
// An EventCmd needs to be validated to be converted into an Event
// handle each type of message
let parsed : Result<Event> = Result::<Event>::from(ec);
match parsed {
Ok(e) => {
let id_prefix:String = e.id.chars().take(8).collect();
debug!("successfully parsed/validated event: {} from client: {}", id_prefix, cid);
// Write this to the database
event_tx.send(e.clone()).await.ok();
client_published_event_count += 1;
},
Err(_) => {
info!("client {} sent an invalid event", cid);
nostr_stream.send(NoticeRes("event was invalid".to_owned())).await.ok();
}
}
},
Some(Ok(SubMsg(s))) => {
debug!("client {} requesting a subscription", cid);
// subscription handling consists of:
// * registering the subscription so future events can be matched
// * making a channel to cancel to request later
// * sending a request for a SQL query
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
match conn.subscribe(s.clone()) {
Ok(()) => {
running_queries.insert(s.id.to_owned(), abandon_query_tx);
// start a database query
db::db_query(s, query_tx.clone(), abandon_query_rx).await;
},
Err(e) => {
info!("Subscription error: {}", e);
nostr_stream.send(NoticeRes(format!("{}",e))).await.ok();
}
}
},
Some(Ok(CloseMsg(cc))) => {
// closing a request simply removes the subscription.
let parsed : Result<Close> = Result::<Close>::from(cc);
match parsed {
Ok(c) => {
// check if a query is currently
// running, and remove it if so.
let stop_tx = running_queries.remove(&c.id);
if let Some(tx) = stop_tx {
tx.send(()).ok();
}
// stop checking new events against
// the subscription
conn.unsubscribe(c);
},
Err(_) => {
info!("invalid command ignored");
}
}
},
None => {
debug!("normal websocket close from client: {}",cid);
break;
},
Some(Err(Error::ConnError)) => {
debug!("got connection close/error, disconnecting client: {}",cid);
break;
}
Some(Err(e)) => {
info!("got non-fatal error from client: {}, error: {:?}", cid, e);
},
}
},
}
}
// connection cleanup - ensure any still running queries are terminated.
for (_, stop_tx) in running_queries.into_iter() {
stop_tx.send(()).ok();
}
info!(
"stopping connection for client: {} (client sent {} event(s), received {})",
cid, client_published_event_count, client_received_event_count
);
} }

824
src/nip05.rs Normal file
View File

@@ -0,0 +1,824 @@
//! User verification using NIP-05 names
//!
//! NIP-05 defines a mechanism for authors to associate an internet
//! address with their public key, in metadata events. This module
//! consumes a stream of metadata events, and keeps a database table
//! updated with the current NIP-05 verification status.
use crate::config::VerifiedUsers;
use crate::db;
use crate::error::{Error, Result};
use crate::event::Event;
use crate::utils::unix_time;
use hyper::body::HttpBody;
use hyper::client::connect::HttpConnector;
use hyper::Client;
use hyper_tls::HttpsConnector;
use rand::Rng;
use rusqlite::params;
use std::time::Duration;
use std::time::Instant;
use std::time::SystemTime;
use tokio::time::Interval;
use tracing::{debug, info, warn};
/// NIP-05 verifier state
pub struct Verifier {
/// Metadata events for us to inspect
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
/// Newly validated events get written and then broadcast on this channel to subscribers
event_tx: tokio::sync::broadcast::Sender<Event>,
/// SQLite read query pool
read_pool: db::SqlitePool,
/// SQLite write query pool
write_pool: db::SqlitePool,
/// Settings
settings: crate::config::Settings,
/// HTTP client
client: hyper::Client<HttpsConnector<HttpConnector>, hyper::Body>,
/// After all accounts are updated, wait this long before checking again.
wait_after_finish: Duration,
/// Minimum amount of time between HTTP queries
http_wait_duration: Duration,
/// Interval for updating verification records
reverify_interval: Interval,
}
/// A NIP-05 identifier is a local part and domain.
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct Nip05Name {
local: String,
domain: String,
}
impl Nip05Name {
/// Does this name represent the entire domain?
pub fn is_domain_only(&self) -> bool {
self.local == "_"
}
/// Determine the URL to query for verification
fn to_url(&self) -> Option<http::Uri> {
format!(
"https://{}/.well-known/nostr.json?name={}",
self.domain, self.local
)
.parse::<http::Uri>()
.ok()
}
}
// Parsing Nip05Names from strings
impl std::convert::TryFrom<&str> for Nip05Name {
type Error = Error;
fn try_from(inet: &str) -> Result<Self, Self::Error> {
// break full name at the @ boundary.
let components: Vec<&str> = inet.split('@').collect();
if components.len() != 2 {
Err(Error::CustomError("too many/few components".to_owned()))
} else {
// check if local name is valid
let local = components[0];
let domain = components[1];
if local
.chars()
.all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.')
{
if domain
.chars()
.all(|x| x.is_alphanumeric() || x == '-' || x == '.')
{
Ok(Nip05Name {
local: local.to_owned(),
domain: domain.to_owned(),
})
} else {
Err(Error::CustomError(
"invalid character in domain part".to_owned(),
))
}
} else {
Err(Error::CustomError(
"invalid character in local part".to_owned(),
))
}
}
}
}
impl std::fmt::Display for Nip05Name {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}@{}", self.local, self.domain)
}
}
// Current time, with a slight foward jitter in seconds
fn now_jitter(sec: u64) -> u64 {
// random time between now, and 10min in future.
let mut rng = rand::thread_rng();
let jitter_amount = rng.gen_range(0..sec);
let now = unix_time();
now.saturating_add(jitter_amount)
}
/// Check if the specified username and address are present and match in this response body
fn body_contains_user(username: &str, address: &str, bytes: hyper::body::Bytes) -> Result<bool> {
// convert the body into json
let body: serde_json::Value = serde_json::from_slice(&bytes)?;
// ensure we have a names object.
let names_map = body
.as_object()
.and_then(|x| x.get("names"))
.and_then(|x| x.as_object())
.ok_or_else(|| Error::CustomError("not a map".to_owned()))?;
// get the pubkey for the requested user
let check_name = names_map.get(username).and_then(|x| x.as_str());
// ensure the address is a match
Ok(check_name.map(|x| x == address).unwrap_or(false))
}
impl Verifier {
pub fn new(
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
event_tx: tokio::sync::broadcast::Sender<Event>,
settings: crate::config::Settings,
) -> Result<Self> {
info!("creating NIP-05 verifier");
// build a database connection for reading and writing.
let write_pool = db::build_pool(
"nip05 writer",
&settings,
rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE,
1, // min conns
4, // max conns
true, // wait for DB
);
let read_pool = db::build_pool(
"nip05 reader",
&settings,
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
1, // min conns
8, // max conns
true, // wait for DB
);
// setup hyper client
let https = HttpsConnector::new();
let client = Client::builder().build::<_, hyper::Body>(https);
// After all accounts have been re-verified, don't check again
// for this long.
let wait_after_finish = Duration::from_secs(60 * 10);
// when we have an active queue of accounts to validate, we
// will wait this duration between HTTP requests.
let http_wait_duration = Duration::from_secs(1);
// setup initial interval for re-verification. If we find
// there is no work to be done, it will be reset to a longer
// duration.
let reverify_interval = tokio::time::interval(http_wait_duration);
Ok(Verifier {
metadata_rx,
event_tx,
read_pool,
write_pool,
settings,
client,
wait_after_finish,
http_wait_duration,
reverify_interval,
})
}
/// Perform web verification against a NIP-05 name and address.
pub async fn get_web_verification(
&mut self,
nip: &Nip05Name,
pubkey: &str,
) -> UserWebVerificationStatus {
self.get_web_verification_res(nip, pubkey)
.await
.unwrap_or(UserWebVerificationStatus::Unknown)
}
/// Perform web verification against an `Event` (must be metadata).
pub async fn get_web_verification_from_event(
&mut self,
e: &Event,
) -> UserWebVerificationStatus {
let nip_parse = e.get_nip05_addr();
if let Some(nip) = nip_parse {
self.get_web_verification_res(&nip, &e.pubkey)
.await
.unwrap_or(UserWebVerificationStatus::Unknown)
} else {
UserWebVerificationStatus::Unknown
}
}
/// Perform web verification, with a `Result` return.
async fn get_web_verification_res(
&mut self,
nip: &Nip05Name,
pubkey: &str,
) -> Result<UserWebVerificationStatus> {
// determine if this domain should be checked
if !is_domain_allowed(
&nip.domain,
&self.settings.verified_users.domain_whitelist,
&self.settings.verified_users.domain_blacklist,
) {
return Ok(UserWebVerificationStatus::DomainNotAllowed);
}
let url = nip
.to_url()
.ok_or_else(|| Error::CustomError("invalid NIP-05 URL".to_owned()))?;
let req = hyper::Request::builder()
.method(hyper::Method::GET)
.uri(url)
.header("Accept", "application/json")
.header(
"User-Agent",
format!(
"nostr-rs-relay/{} NIP-05 Verifier",
crate::info::CARGO_PKG_VERSION.unwrap()
),
)
.body(hyper::Body::empty())
.expect("request builder");
let response_fut = self.client.request(req);
// HTTP request with timeout
match tokio::time::timeout(Duration::from_secs(5), response_fut).await {
Ok(response_res) => {
// limit size of verification document to 1MB.
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
let response = response_res?;
// determine content length from response
let response_content_length = match response.body().size_hint().upper() {
Some(v) => v,
None => MAX_ALLOWED_RESPONSE_SIZE + 1, // reject missing content length
};
// TODO: test how hyper handles the client providing an inaccurate content-length.
if response_content_length <= MAX_ALLOWED_RESPONSE_SIZE {
let (parts, body) = response.into_parts();
// TODO: consider redirects
if parts.status == http::StatusCode::OK {
// parse body, determine if the username / key / address is present
let body_bytes = hyper::body::to_bytes(body).await?;
let body_matches = body_contains_user(&nip.local, pubkey, body_bytes)?;
if body_matches {
return Ok(UserWebVerificationStatus::Verified);
}
// successful response, parsed as a nip-05
// document, but this name/pubkey was not
// present.
return Ok(UserWebVerificationStatus::Unverified);
}
} else {
info!(
"content length missing or exceeded limits for account: {:?}",
nip.to_string()
);
}
}
Err(_) => {
info!("timeout verifying account {:?}", nip);
return Ok(UserWebVerificationStatus::Unknown);
}
}
Ok(UserWebVerificationStatus::Unknown)
}
/// Perform NIP-05 verifier tasks.
pub async fn run(&mut self) {
// use this to schedule periodic re-validation tasks
// run a loop, restarting on failure
loop {
let res = self.run_internal().await;
if let Err(e) = res {
info!("error in verifier: {:?}", e);
}
}
}
/// Internal select loop for performing verification
async fn run_internal(&mut self) -> Result<()> {
tokio::select! {
m = self.metadata_rx.recv() => {
match m {
Ok(e) => {
if let Some(naddr) = e.get_nip05_addr() {
info!("got metadata event for ({:?},{:?})", naddr.to_string() ,e.get_author_prefix());
// Process a new author, checking if they are verified:
let check_verified = get_latest_user_verification(self.read_pool.get().expect("could not get connection"), &e.pubkey).await;
// ensure the event we got is more recent than the one we have, otherwise we can ignore it.
if let Ok(last_check) = check_verified {
if e.created_at <= last_check.event_created {
// this metadata is from the same author as an existing verification.
// it is older than what we have, so we can ignore it.
debug!("received older metadata event for author {:?}", e.get_author_prefix());
return Ok(());
}
}
// old, or no existing record for this user. In either case, we just create a new one.
let start = Instant::now();
let v = self.get_web_verification_from_event(&e).await;
info!(
"checked name {:?}, result: {:?}, in: {:?}",
naddr.to_string(),
v,
start.elapsed()
);
// sleep to limit how frequently we make HTTP requests for new metadata events. This should limit us to 4 req/sec.
tokio::time::sleep(Duration::from_millis(250)).await;
// if this user was verified, we need to write the
// record, persist the event, and broadcast.
if let UserWebVerificationStatus::Verified = v {
self.create_new_verified_user(&naddr.to_string(), &e).await?;
}
}
},
Err(tokio::sync::broadcast::error::RecvError::Lagged(c)) => {
warn!("incoming metadata events overwhelmed buffer, {} events dropped",c);
}
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
info!("metadata broadcast channel closed");
}
}
},
_ = self.reverify_interval.tick() => {
// check and see if there is an old account that needs
// to be reverified
self.do_reverify().await?;
},
}
Ok(())
}
/// Reverify the oldest user verification record.
async fn do_reverify(&mut self) -> Result<()> {
let reverify_setting = self
.settings
.verified_users
.verify_update_frequency_duration;
let max_failures = self.settings.verified_users.max_consecutive_failures;
// get from settings, but default to 6hrs between re-checking an account
let reverify_dur = reverify_setting.unwrap_or_else(|| Duration::from_secs(60 * 60 * 6));
// find all verification records that have success or failure OLDER than the reverify_dur.
let now = SystemTime::now();
let earliest = now - reverify_dur;
let earliest_epoch = earliest
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
.unwrap_or(0);
let vr = get_oldest_user_verification(self.read_pool.get()?, earliest_epoch).await;
match vr {
Ok(ref v) => {
let new_status = self.get_web_verification(&v.name, &v.address).await;
match new_status {
UserWebVerificationStatus::Verified => {
// freshly verified account, update the
// timestamp.
self.update_verification_record(self.write_pool.get()?, v)
.await?;
}
UserWebVerificationStatus::DomainNotAllowed
| UserWebVerificationStatus::Unknown => {
// server may be offline, or temporarily
// blocked by the config file. Note the
// failure so we can process something
// else.
// have we had enough failures to give up?
if v.failure_count >= max_failures as u64 {
info!(
"giving up on verifying {:?} after {} failures",
v.name, v.failure_count
);
self.delete_verification_record(self.write_pool.get()?, v)
.await?;
} else {
// record normal failure, incrementing failure count
self.fail_verification_record(self.write_pool.get()?, v)
.await?;
}
}
UserWebVerificationStatus::Unverified => {
// domain has removed the verification, drop
// the record on our side.
self.delete_verification_record(self.write_pool.get()?, v)
.await?;
}
}
}
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
// No users need verification. Reset the interval to
// the next verification attempt.
let start = tokio::time::Instant::now() + self.wait_after_finish;
self.reverify_interval = tokio::time::interval_at(start, self.http_wait_duration);
}
Err(ref e) => {
warn!(
"Error when checking for NIP-05 verification records: {:?}",
e
);
}
}
Ok(())
}
/// Reset the verification timestamp on a VerificationRecord
pub async fn update_verification_record(
&mut self,
mut conn: db::PooledConnection,
vr: &VerificationRecord,
) -> Result<()> {
let vr_id = vr.rowid;
let vr_str = vr.to_string();
tokio::task::spawn_blocking(move || {
// add some jitter to the verification to prevent everything from stacking up together.
let verif_time = now_jitter(600);
let tx = conn.transaction()?;
{
// update verification time and reset any failure count
let query =
"UPDATE user_verification SET verified_at=?, failure_count=0 WHERE id=?";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![verif_time, vr_id])?;
}
tx.commit()?;
info!("verification updated for {}", vr_str);
let ok: Result<()> = Ok(());
ok
})
.await?
}
/// Reset the failure timestamp on a VerificationRecord
pub async fn fail_verification_record(
&mut self,
mut conn: db::PooledConnection,
vr: &VerificationRecord,
) -> Result<()> {
let vr_id = vr.rowid;
let vr_str = vr.to_string();
let fail_count = vr.failure_count.saturating_add(1);
tokio::task::spawn_blocking(move || {
// add some jitter to the verification to prevent everything from stacking up together.
let fail_time = now_jitter(600);
let tx = conn.transaction()?;
{
let query = "UPDATE user_verification SET failed_at=?, failure_count=? WHERE id=?";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![fail_time, fail_count, vr_id])?;
}
tx.commit()?;
info!("verification failed for {}", vr_str);
let ok: Result<()> = Ok(());
ok
})
.await?
}
/// Delete a VerificationRecord that is no longer valid
pub async fn delete_verification_record(
&mut self,
mut conn: db::PooledConnection,
vr: &VerificationRecord,
) -> Result<()> {
let vr_id = vr.rowid;
let vr_str = vr.to_string();
tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
{
let query = "DELETE FROM user_verification WHERE id=?;";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![vr_id])?;
}
tx.commit()?;
info!("verification rescinded for {}", vr_str);
let ok: Result<()> = Ok(());
ok
})
.await?
}
/// Persist an event, create a verification record, and broadcast.
// TODO: have more event-writing logic handled in the db module.
// Right now, these events avoid the rate limit. That is
// acceptable since as soon as the user is registered, this path
// is no longer used.
// TODO: refactor these into spawn_blocking
// calls to get them off the async executors.
async fn create_new_verified_user(&mut self, name: &str, event: &Event) -> Result<()> {
let start = Instant::now();
// we should only do this if we are enabled. if we are
// disabled/passive, the event has already been persisted.
let should_write_event = self.settings.verified_users.is_enabled();
if should_write_event {
match db::write_event(&mut self.write_pool.get()?, event) {
Ok(updated) => {
if updated != 0 {
info!(
"persisted event: {:?} in {:?}",
event.get_event_id_prefix(),
start.elapsed()
);
self.event_tx.send(event.clone()).ok();
}
}
Err(err) => {
warn!("event insert failed: {:?}", err);
if let Error::SqlError(r) = err {
warn!("because: : {:?}", r);
}
}
}
}
// write the verification record
save_verification_record(self.write_pool.get()?, event, name).await?;
Ok(())
}
}
/// Result of checking user's verification status against DNS/HTTP.
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum UserWebVerificationStatus {
Verified, // user is verified, as of now.
DomainNotAllowed, // domain blacklist or whitelist denied us from attempting a verification
Unknown, // user's status could not be determined (timeout, server error)
Unverified, // user's status is not verified (successful check, name / addr do not match)
}
/// A NIP-05 verification record.
#[derive(PartialEq, Eq, Debug, Clone)]
// Basic information for a verification event. Gives us all we need to assert a NIP-05 address is good.
pub struct VerificationRecord {
pub rowid: u64, // database row for this verification event
pub name: Nip05Name, // address being verified
pub address: String, // pubkey
pub event: String, // event ID hash providing the verification
pub event_created: u64, // when the metadata event was published
pub last_success: Option<u64>, // the most recent time a verification was provided. None if verification under this name has never succeeded.
pub last_failure: Option<u64>, // the most recent time verification was attempted, but could not be completed.
pub failure_count: u64, // how many consecutive failures have been observed.
}
/// Check with settings to determine if a given domain is allowed to
/// publish.
pub fn is_domain_allowed(
domain: &str,
whitelist: &Option<Vec<String>>,
blacklist: &Option<Vec<String>>,
) -> bool {
// if there is a whitelist, domain must be present in it.
if let Some(wl) = whitelist {
// workaround for Vec contains not accepting &str
return wl.iter().any(|x| x == domain);
}
// otherwise, check that user is not in the blacklist
if let Some(bl) = blacklist {
return !bl.iter().any(|x| x == domain);
}
true
}
impl VerificationRecord {
/// Check if the record is recent enough to be considered valid,
/// and the domain is allowed.
pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
//let settings = SETTINGS.read().unwrap();
// how long a verification record is good for
let nip05_expiration = &verified_users_settings.verify_expiration_duration;
if let Some(e) = nip05_expiration {
if !self.is_current(e) {
return false;
}
}
// check domains
is_domain_allowed(
&self.name.domain,
&verified_users_settings.domain_whitelist,
&verified_users_settings.domain_blacklist,
)
}
/// Check if this record has been validated since the given
/// duration.
fn is_current(&self, d: &Duration) -> bool {
match self.last_success {
Some(s) => {
// current time - duration
let now = SystemTime::now();
let cutoff = now - *d;
let cutoff_epoch = cutoff
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
.unwrap_or(0);
s > cutoff_epoch
}
None => false,
}
}
}
impl std::fmt::Display for VerificationRecord {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"({:?},{:?})",
self.name.to_string(),
self.address.chars().take(8).collect::<String>()
)
}
}
/// Create a new verification record based on an event
pub async fn save_verification_record(
mut conn: db::PooledConnection,
event: &Event,
name: &str,
) -> Result<()> {
let e = hex::decode(&event.id).ok();
let n = name.to_owned();
let a_prefix = event.get_author_prefix();
tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
{
// if we create a /new/ one, we should get rid of any old ones. or group the new ones by name and only consider the latest.
let query = "INSERT INTO user_verification (metadata_event, name, verified_at) VALUES ((SELECT id from event WHERE event_hash=?), ?, strftime('%s','now'));";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![e, n])?;
// get the row ID
let v_id = tx.last_insert_rowid();
// delete everything else by this name
let del_query = "DELETE FROM user_verification WHERE name = ? AND id != ?;";
let mut del_stmt = tx.prepare(del_query)?;
let count = del_stmt.execute(params![n,v_id])?;
if count > 0 {
info!("removed {} old verification records for ({:?},{:?})", count, n, a_prefix);
}
}
tx.commit()?;
info!("saved new verification record for ({:?},{:?})", n, a_prefix);
let ok: Result<()> = Ok(());
ok
}).await?
}
/// Retrieve the most recent verification record for a given pubkey (async).
pub async fn get_latest_user_verification(
conn: db::PooledConnection,
pubkey: &str,
) -> Result<VerificationRecord> {
let p = pubkey.to_owned();
tokio::task::spawn_blocking(move || query_latest_user_verification(conn, p)).await?
}
/// Query database for the latest verification record for a given pubkey.
pub fn query_latest_user_verification(
mut conn: db::PooledConnection,
pubkey: String,
) -> Result<VerificationRecord> {
let tx = conn.transaction()?;
let query = "SELECT v.id, v.name, e.event_hash, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE e.author=? ORDER BY e.created_at DESC, v.verified_at DESC, v.failed_at DESC LIMIT 1;";
let mut stmt = tx.prepare_cached(query)?;
let fields = stmt.query_row(params![hex::decode(&pubkey).ok()], |r| {
let rowid: u64 = r.get(0)?;
let rowname: String = r.get(1)?;
let eventid: Vec<u8> = r.get(2)?;
let created_at: u64 = r.get(3)?;
// create a tuple since we can't throw non-rusqlite errors in this closure
Ok((
rowid,
rowname,
eventid,
created_at,
r.get(4).ok(),
r.get(5).ok(),
r.get(6)?,
))
})?;
Ok(VerificationRecord {
rowid: fields.0,
name: Nip05Name::try_from(&fields.1[..])?,
address: pubkey,
event: hex::encode(fields.2),
event_created: fields.3,
last_success: fields.4,
last_failure: fields.5,
failure_count: fields.6,
})
}
/// Retrieve the oldest user verification (async)
pub async fn get_oldest_user_verification(
conn: db::PooledConnection,
earliest: u64,
) -> Result<VerificationRecord> {
tokio::task::spawn_blocking(move || query_oldest_user_verification(conn, earliest)).await?
}
pub fn query_oldest_user_verification(
mut conn: db::PooledConnection,
earliest: u64,
) -> Result<VerificationRecord> {
let tx = conn.transaction()?;
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
let mut stmt = tx.prepare_cached(query)?;
let fields = stmt.query_row(params![earliest, earliest], |r| {
let rowid: u64 = r.get(0)?;
let rowname: String = r.get(1)?;
let eventid: Vec<u8> = r.get(2)?;
let pubkey: Vec<u8> = r.get(3)?;
let created_at: u64 = r.get(4)?;
// create a tuple since we can't throw non-rusqlite errors in this closure
Ok((
rowid,
rowname,
eventid,
pubkey,
created_at,
r.get(5).ok(),
r.get(6).ok(),
r.get(7)?,
))
})?;
let vr = VerificationRecord {
rowid: fields.0,
name: Nip05Name::try_from(&fields.1[..])?,
address: hex::encode(fields.3),
event: hex::encode(fields.2),
event_created: fields.4,
last_success: fields.5,
last_failure: fields.6,
failure_count: fields.7,
};
Ok(vr)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn local_from_inet() {
let addr = "bob@example.com";
let parsed = Nip05Name::try_from(addr);
assert!(!parsed.is_err());
let v = parsed.unwrap();
assert_eq!(v.local, "bob");
assert_eq!(v.domain, "example.com");
}
#[test]
fn not_enough_sep() {
let addr = "bob_example.com";
let parsed = Nip05Name::try_from(addr);
assert!(parsed.is_err());
}
#[test]
fn too_many_sep() {
let addr = "foo@bob@example.com";
let parsed = Nip05Name::try_from(addr);
assert!(parsed.is_err());
}
#[test]
fn invalid_local_name() {
// non-permitted ascii chars
assert!(Nip05Name::try_from("foo!@example.com").is_err());
assert!(Nip05Name::try_from("foo @example.com").is_err());
assert!(Nip05Name::try_from(" foo@example.com").is_err());
assert!(Nip05Name::try_from("f oo@example.com").is_err());
assert!(Nip05Name::try_from("foo<@example.com").is_err());
// unicode dash
assert!(Nip05Name::try_from("foobar@example.com").is_err());
// emoji
assert!(Nip05Name::try_from("foo😭bar@example.com").is_err());
}
#[test]
fn invalid_domain_name() {
// non-permitted ascii chars
assert!(Nip05Name::try_from("foo@examp!e.com").is_err());
assert!(Nip05Name::try_from("foo@ example.com").is_err());
assert!(Nip05Name::try_from("foo@exa mple.com").is_err());
assert!(Nip05Name::try_from("foo@example .com").is_err());
assert!(Nip05Name::try_from("foo@exa<mple.com").is_err());
// unicode dash
assert!(Nip05Name::try_from("foobar@example.com").is_err());
// emoji
assert!(Nip05Name::try_from("foobar@ex😭ample.com").is_err());
}
#[test]
fn to_url() {
let nip = Nip05Name::try_from("foobar@example.com").unwrap();
assert_eq!(
nip.to_url(),
Some(
"https://example.com/.well-known/nostr.json?name=foobar"
.parse()
.unwrap()
)
);
}
}

86
src/notice.rs Normal file
View File

@@ -0,0 +1,86 @@
pub enum EventResultStatus {
Saved,
Duplicate,
Invalid,
Blocked,
RateLimited,
Error,
}
pub struct EventResult {
pub id: String,
pub msg: String,
pub status: EventResultStatus,
}
pub enum Notice {
Message(String),
EventResult(EventResult),
}
impl EventResultStatus {
pub fn to_bool(&self) -> bool {
match self {
Self::Saved => true,
Self::Duplicate => true,
Self::Invalid => false,
Self::Blocked => false,
Self::RateLimited => false,
Self::Error => false,
}
}
pub fn prefix(&self) -> &'static str {
match self {
Self::Saved => "saved",
Self::Duplicate => "duplicate",
Self::Invalid => "invalid",
Self::Blocked => "blocked",
Self::RateLimited => "rate-limited",
Self::Error => "error",
}
}
}
impl Notice {
//pub fn err(err: error::Error, id: String) -> Notice {
// Notice::err_msg(format!("{}", err), id)
//}
pub fn message(msg: String) -> Notice {
Notice::Message(msg)
}
fn prefixed(id: String, msg: &str, status: EventResultStatus) -> Notice {
let msg = format!("{}: {}", status.prefix(), msg);
Notice::EventResult(EventResult { id, msg, status })
}
pub fn invalid(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Invalid)
}
pub fn blocked(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Blocked)
}
pub fn rate_limited(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::RateLimited)
}
pub fn duplicate(id: String) -> Notice {
Notice::prefixed(id, "", EventResultStatus::Duplicate)
}
pub fn error(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Error)
}
pub fn saved(id: String) -> Notice {
Notice::EventResult(EventResult {
id,
msg: "".into(),
status: EventResultStatus::Saved,
})
}
}

View File

@@ -1,122 +0,0 @@
//! Nostr protocol layered over WebSocket
use crate::close::CloseCmd;
use crate::error::{Error, Result};
use crate::event::EventCmd;
use crate::subscription::Subscription;
use core::pin::Pin;
use futures::sink::Sink;
use futures::stream::Stream;
use futures::task::Context;
use futures::task::Poll;
use log::*;
use serde::{Deserialize, Serialize};
use tokio::net::TcpStream;
use tokio_tungstenite::WebSocketStream;
use tungstenite::error::Error as WsError;
use tungstenite::protocol::Message;
/// Nostr protocol messages from a client
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
#[serde(untagged)]
pub enum NostrMessage {
/// An `EVENT` message
EventMsg(EventCmd),
/// A `REQ` message
SubMsg(Subscription),
/// A `CLOSE` message
CloseMsg(CloseCmd),
}
/// Nostr protocol messages from a relay/server
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
pub enum NostrResponse {
/// A `NOTICE` response
NoticeRes(String),
/// An `EVENT` response, composed of the subscription identifier,
/// and serialized event JSON
EventRes(String, String),
}
/// A Nostr protocol stream is layered on top of a Websocket stream.
pub struct NostrStream {
ws_stream: WebSocketStream<TcpStream>,
}
/// Given a websocket, return a protocol stream wrapper.
pub fn wrap_ws_in_nostr(ws: WebSocketStream<TcpStream>) -> NostrStream {
NostrStream { ws_stream: ws }
}
/// Implement the [`Stream`] interface to produce Nostr messages.
impl Stream for NostrStream {
type Item = Result<NostrMessage>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
/// Convert Message to NostrMessage
fn convert(msg: String) -> Result<NostrMessage> {
debug!("raw msg: {}", msg);
let event_size = msg.len();
debug!("event size is {} bytes", event_size);
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
match parsed_res {
Ok(m) => Ok(m),
Err(e) => {
debug!("proto parse error: {:?}", e);
Err(Error::ProtoParseError)
}
}
}
match Pin::new(&mut self.ws_stream).poll_next(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(None) => Poll::Ready(None),
Poll::Ready(Some(v)) => match v {
Ok(Message::Text(vs)) => Poll::Ready(Some(convert(vs))),
Ok(Message::Binary(_)) => Poll::Ready(Some(Err(Error::ProtoParseError))),
Ok(Message::Pong(_)) | Ok(Message::Ping(_)) => Poll::Pending,
Ok(Message::Close(_)) => Poll::Ready(None),
Err(WsError::AlreadyClosed) | Err(WsError::ConnectionClosed) => Poll::Ready(None),
Err(_) => Poll::Ready(Some(Err(Error::ConnError))),
},
}
}
}
/// Implement the [`Sink`] interface to produce Nostr responses.
impl Sink<NostrResponse> for NostrStream {
type Error = Error;
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// map the error type
match Pin::new(&mut self.ws_stream).poll_ready(cx) {
Poll::Ready(Ok(())) => Poll::Ready(Ok(())),
Poll::Ready(Err(_)) => Poll::Ready(Err(Error::ConnWriteError)),
Poll::Pending => Poll::Pending,
}
}
fn start_send(mut self: Pin<&mut Self>, item: NostrResponse) -> Result<(), Self::Error> {
// TODO: do real escaping for these - at least on NOTICE,
// which surely has some problems if arbitrary text is sent.
let send_str = match item {
NostrResponse::NoticeRes(msg) => {
let s = msg.replace("\"", "");
format!("[\"NOTICE\",\"{}\"]", s)
}
NostrResponse::EventRes(sub, eventstr) => {
let subesc = sub.replace("\"", "");
format!("[\"EVENT\",\"{}\",{}]", subesc, eventstr)
}
};
match Pin::new(&mut self.ws_stream).start_send(Message::Text(send_str)) {
Ok(()) => Ok(()),
Err(_) => Err(Error::ConnWriteError),
}
}
fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
}

423
src/schema.rs Normal file
View File

@@ -0,0 +1,423 @@
//! Database schema and migrations
use crate::db::PooledConnection;
use crate::error::Result;
use crate::event::{single_char_tagname, Event};
use crate::utils::is_lower_hex;
use const_format::formatcp;
use rusqlite::limits::Limit;
use rusqlite::params;
use rusqlite::Connection;
use std::cmp::Ordering;
use std::time::Instant;
use tracing::{debug, error, info};
/// Startup DB Pragmas
pub const STARTUP_SQL: &str = r##"
PRAGMA main.synchronous=NORMAL;
PRAGMA foreign_keys = ON;
PRAGMA journal_size_limit=32768;
pragma mmap_size = 536870912; -- 512MB of mmap
"##;
/// Latest database version
pub const DB_VERSION: usize = 9;
/// Schema definition
const INIT_SQL: &str = formatcp!(
r##"
-- Database settings
PRAGMA encoding = "UTF-8";
PRAGMA journal_mode=WAL;
PRAGMA main.synchronous=NORMAL;
PRAGMA foreign_keys = ON;
PRAGMA application_id = 1654008667;
PRAGMA user_version = {};
-- Event Table
CREATE TABLE IF NOT EXISTS event (
id INTEGER PRIMARY KEY,
event_hash BLOB NOT NULL, -- 4-byte hash
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970)
created_at INTEGER NOT NULL, -- when the event was authored
author BLOB NOT NULL, -- author pubkey
delegated_by BLOB, -- delegator pubkey (NIP-26)
kind INTEGER NOT NULL, -- event kind
hidden INTEGER, -- relevant for queries
content TEXT NOT NULL -- serialized json of event object
);
-- Event Indexes
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
CREATE INDEX IF NOT EXISTS author_index ON event(author);
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
-- Tag Table
-- Tag values are stored as either a BLOB (if they come in as a
-- hex-string), or TEXT otherwise.
-- This means that searches need to select the appropriate column.
CREATE TABLE IF NOT EXISTS tag (
id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
name TEXT, -- the tag name ("p", "e", whatever)
value TEXT, -- the tag value, if not hex.
value_hex BLOB, -- the tag value, if it can be interpreted as a lowercase hex string.
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
CREATE INDEX IF NOT EXISTS tag_val_hex_index ON tag(value_hex);
-- NIP-05 User Validation
CREATE TABLE IF NOT EXISTS user_verification (
id INTEGER PRIMARY KEY,
metadata_event INTEGER NOT NULL, -- the metadata event used for this validation.
name TEXT NOT NULL, -- the nip05 field value (user@domain).
verified_at INTEGER, -- timestamp this author/nip05 was most recently verified.
failed_at INTEGER, -- timestamp a verification attempt failed (host down).
failure_count INTEGER DEFAULT 0, -- number of consecutive failures.
FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
"##,
DB_VERSION
);
/// Determine the current application database schema version.
pub fn curr_db_version(conn: &mut Connection) -> Result<usize> {
let query = "PRAGMA user_version;";
let curr_version = conn.query_row(query, [], |row| row.get(0))?;
Ok(curr_version)
}
fn mig_init(conn: &mut PooledConnection) -> Result<usize> {
match conn.execute_batch(INIT_SQL) {
Ok(()) => {
info!(
"database pragma/schema initialized to v{}, and ready",
DB_VERSION
);
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be initialized");
}
}
Ok(DB_VERSION)
}
/// Upgrade DB to latest version, and execute pragma settings
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
// check the version.
let mut curr_version = curr_db_version(conn)?;
info!("DB version = {:?}", curr_version);
debug!(
"SQLite max query parameters: {}",
conn.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER)
);
debug!(
"SQLite max table/blob/text length: {} MB",
(conn.limit(Limit::SQLITE_LIMIT_LENGTH) as f64 / (1024 * 1024) as f64).floor()
);
debug!(
"SQLite max SQL length: {} MB",
(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH) as f64 / (1024 * 1024) as f64).floor()
);
match curr_version.cmp(&DB_VERSION) {
// Database is new or not current
Ordering::Less => {
// initialize from scratch
if curr_version == 0 {
curr_version = mig_init(conn)?;
}
// for initialized but out-of-date schemas, proceed to
// upgrade sequentially until we are current.
if curr_version == 1 {
curr_version = mig_1_to_2(conn)?;
}
if curr_version == 2 {
curr_version = mig_2_to_3(conn)?;
}
if curr_version == 3 {
curr_version = mig_3_to_4(conn)?;
}
if curr_version == 4 {
curr_version = mig_4_to_5(conn)?;
}
if curr_version == 5 {
curr_version = mig_5_to_6(conn)?;
}
if curr_version == 6 {
curr_version = mig_6_to_7(conn)?;
}
if curr_version == 7 {
curr_version = mig_7_to_8(conn)?;
}
if curr_version == 8 {
curr_version = mig_8_to_9(conn)?;
}
if curr_version == DB_VERSION {
info!(
"All migration scripts completed successfully. Welcome to v{}.",
DB_VERSION
);
}
}
// Database is current, all is good
Ordering::Equal => {
debug!("Database version was already current (v{})", DB_VERSION);
}
// Database is newer than what this code understands, abort
Ordering::Greater => {
panic!(
"Database version is newer than supported by this executable (v{} > v{})",
curr_version, DB_VERSION
);
}
}
// Setup PRAGMA
conn.execute_batch(STARTUP_SQL)?;
debug!("SQLite PRAGMA startup completed");
Ok(())
}
//// Migration Scripts
fn mig_1_to_2(conn: &mut PooledConnection) -> Result<usize> {
// only change is adding a hidden column to events.
let upgrade_sql = r##"
ALTER TABLE event ADD hidden INTEGER;
UPDATE event SET hidden=FALSE;
PRAGMA user_version = 2;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v1 -> v2");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(2)
}
fn mig_2_to_3(conn: &mut PooledConnection) -> Result<usize> {
// this version lacks the tag column
info!("database schema needs update from 2->3");
let upgrade_sql = r##"
CREATE TABLE IF NOT EXISTS tag (
id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
name TEXT, -- the tag name ("p", "e", whatever)
value TEXT, -- the tag value, if not hex.
value_hex BLOB, -- the tag value, if it can be interpreted as a hex string.
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
PRAGMA user_version = 3;
"##;
// TODO: load existing refs into tag table
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v2 -> v3");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
// iterate over every event/pubkey tag
let tx = conn.transaction()?;
{
let mut stmt = tx.prepare("select event_id, \"e\", lower(hex(referenced_event)) from event_ref union select event_id, \"p\", lower(hex(referenced_pubkey)) from pubkey_ref;")?;
let mut tag_rows = stmt.query([])?;
while let Some(row) = tag_rows.next()? {
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_id: u64 = row.get(0)?;
let tag_name: String = row.get(1)?;
let tag_value: String = row.get(2)?;
// this will leave behind p/e tags that were non-hex, but they are invalid anyways.
if is_lower_hex(&tag_value) {
tx.execute(
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
params![event_id, tag_name, hex::decode(&tag_value).ok()],
)?;
}
}
}
info!("Updated tag values");
tx.commit()?;
Ok(3)
}
fn mig_3_to_4(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 3->4");
let upgrade_sql = r##"
-- incoming metadata events with nip05
CREATE TABLE IF NOT EXISTS user_verification (
id INTEGER PRIMARY KEY,
metadata_event INTEGER NOT NULL, -- the metadata event used for this validation.
name TEXT NOT NULL, -- the nip05 field value (user@domain).
verified_at INTEGER, -- timestamp this author/nip05 was most recently verified.
failed_at INTEGER, -- timestamp a verification attempt failed (host down).
failure_count INTEGER DEFAULT 0, -- number of consecutive failures.
FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
PRAGMA user_version = 4;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v3 -> v4");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(4)
}
fn mig_4_to_5(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 4->5");
let upgrade_sql = r##"
DROP TABLE IF EXISTS event_ref;
DROP TABLE IF EXISTS pubkey_ref;
PRAGMA user_version=5;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v4 -> v5");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(5)
}
fn mig_5_to_6(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 5->6");
// We need to rebuild the tags table. iterate through the
// event table. build event from json, insert tags into a
// fresh tag table. This was needed due to a logic error in
// how hex-like tags got indexed.
let start = Instant::now();
let tx = conn.transaction()?;
{
// Clear out table
tx.execute("DELETE FROM tag;", [])?;
let mut stmt = tx.prepare("select id, content from event order by id;")?;
let mut tag_rows = stmt.query([])?;
while let Some(row) = tag_rows.next()? {
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_id: u64 = row.get(0)?;
let event_json: String = row.get(1)?;
let event: Event = serde_json::from_str(&event_json)?;
// look at each event, and each tag, creating new tag entries if appropriate.
for t in event.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
// safe because len was > 1
let tagval = t.get(1).unwrap();
// insert as BLOB if we can restore it losslessly.
// this means it needs to be even length and lowercase.
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
tx.execute(
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
params![event_id, tagname, hex::decode(tagval).ok()],
)?;
} else {
// otherwise, insert as text
tx.execute(
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
params![event_id, tagname, &tagval],
)?;
}
}
}
tx.execute("PRAGMA user_version = 6;", [])?;
}
tx.commit()?;
info!("database schema upgraded v5 -> v6 in {:?}", start.elapsed());
// vacuum after large table modification
let start = Instant::now();
conn.execute("VACUUM;", [])?;
info!("vacuumed DB after tags rebuild in {:?}", start.elapsed());
Ok(6)
}
fn mig_6_to_7(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 6->7");
// only change is adding a hidden column to events.
let upgrade_sql = r##"
ALTER TABLE event ADD delegated_by BLOB;
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
PRAGMA user_version = 7;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v6 -> v7");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(7)
}
fn mig_7_to_8(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 7->8");
// Remove redundant indexes, and add a better multi-column index.
let upgrade_sql = r##"
DROP INDEX IF EXISTS created_at_index;
DROP INDEX IF EXISTS kind_index;
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
PRAGMA user_version = 8;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v7 -> v8");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(8)
}
fn mig_8_to_9(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 8->9");
// Those old indexes were actually helpful...
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
PRAGMA user_version = 9;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v8 -> v9");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(8)
}

688
src/server.rs Normal file
View File

@@ -0,0 +1,688 @@
//! Server process
use crate::close::Close;
use crate::close::CloseCmd;
use crate::config::{Settings, VerifiedUsersMode};
use crate::conn;
use crate::db;
use crate::db::SubmittedEvent;
use crate::error::{Error, Result};
use crate::event::Event;
use crate::event::EventCmd;
use crate::info::RelayInfo;
use crate::nip05;
use crate::notice::Notice;
use crate::subscription::Subscription;
use futures::SinkExt;
use futures::StreamExt;
use http::header::HeaderMap;
use hyper::header::ACCEPT;
use hyper::service::{make_service_fn, service_fn};
use hyper::upgrade::Upgraded;
use hyper::{
header, server::conn::AddrStream, upgrade, Body, Request, Response, Server, StatusCode,
};
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::collections::HashMap;
use std::convert::Infallible;
use std::net::SocketAddr;
use std::path::Path;
use std::sync::mpsc::Receiver as MpscReceiver;
use std::time::Duration;
use std::time::Instant;
use tokio::runtime::Builder;
use tokio::sync::broadcast::{self, Receiver, Sender};
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tokio_tungstenite::WebSocketStream;
use tracing::*;
use tungstenite::error::CapacityError::MessageTooLong;
use tungstenite::error::Error as WsError;
use tungstenite::handshake;
use tungstenite::protocol::Message;
use tungstenite::protocol::WebSocketConfig;
/// Handle arbitrary HTTP requests, including for WebSocket upgrades.
async fn handle_web_request(
mut request: Request<Body>,
pool: db::SqlitePool,
settings: Settings,
remote_addr: SocketAddr,
broadcast: Sender<Event>,
event_tx: tokio::sync::mpsc::Sender<SubmittedEvent>,
shutdown: Receiver<()>,
) -> Result<Response<Body>, Infallible> {
match (
request.uri().path(),
request.headers().contains_key(header::UPGRADE),
) {
// Request for / as websocket
("/", true) => {
trace!("websocket with upgrade request");
//assume request is a handshake, so create the handshake response
let response = match handshake::server::create_response_with_body(&request, || {
Body::empty()
}) {
Ok(response) => {
//in case the handshake response creation succeeds,
//spawn a task to handle the websocket connection
tokio::spawn(async move {
//using the hyper feature of upgrading a connection
match upgrade::on(&mut request).await {
//if successfully upgraded
Ok(upgraded) => {
// set WebSocket configuration options
let config = WebSocketConfig {
max_message_size: settings.limits.max_ws_message_bytes,
max_frame_size: settings.limits.max_ws_frame_bytes,
..Default::default()
};
//create a websocket stream from the upgraded object
let ws_stream = WebSocketStream::from_raw_socket(
//pass the upgraded object
//as the base layer stream of the Websocket
upgraded,
tokio_tungstenite::tungstenite::protocol::Role::Server,
Some(config),
)
.await;
let user_agent = get_header_string("user-agent", request.headers());
// determine the remote IP from headers if the exist
let header_ip = settings
.network
.remote_ip_header
.as_ref()
.and_then(|x| get_header_string(x, request.headers()));
// use the socket addr as a backup
let remote_ip =
header_ip.unwrap_or_else(|| remote_addr.ip().to_string());
let client_info = ClientInfo {
remote_ip,
user_agent,
};
// spawn a nostr server with our websocket
tokio::spawn(nostr_server(
pool,
client_info,
settings,
ws_stream,
broadcast,
event_tx,
shutdown,
));
}
// todo: trace, don't print...
Err(e) => println!(
"error when trying to upgrade connection \
from address {} to websocket connection. \
Error is: {}",
remote_addr, e
),
}
});
//return the response to the handshake request
response
}
Err(error) => {
warn!("websocket response failed");
let mut res =
Response::new(Body::from(format!("Failed to create websocket: {}", error)));
*res.status_mut() = StatusCode::BAD_REQUEST;
return Ok(res);
}
};
Ok::<_, Infallible>(response)
}
// Request for Relay info
("/", false) => {
// handle request at root with no upgrade header
// Check if this is a nostr server info request
let accept_header = &request.headers().get(ACCEPT);
// check if application/nostr+json is included
if let Some(media_types) = accept_header {
if let Ok(mt_str) = media_types.to_str() {
if mt_str.contains("application/nostr+json") {
// build a relay info response
debug!("Responding to server info request");
let rinfo = RelayInfo::from(settings.info);
let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap());
return Ok(Response::builder()
.status(200)
.header("Content-Type", "application/nostr+json")
.header("Access-Control-Allow-Origin", "*")
.body(b)
.unwrap());
}
}
}
Ok(Response::builder()
.status(200)
.header("Content-Type", "text/plain")
.body(Body::from("Please use a Nostr client to connect."))
.unwrap())
}
(_, _) => {
//handle any other url
Ok(Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::from("Nothing here."))
.unwrap())
}
}
}
fn get_header_string(header: &str, headers: &HeaderMap) -> Option<String> {
headers
.get(header)
.and_then(|x| x.to_str().ok().map(|x| x.to_string()))
}
// return on a control-c or internally requested shutdown signal
async fn ctrl_c_or_signal(mut shutdown_signal: Receiver<()>) {
let mut term_signal = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
.expect("could not define signal");
loop {
tokio::select! {
_ = shutdown_signal.recv() => {
info!("Shutting down webserver as requested");
// server shutting down, exit loop
break;
},
_ = tokio::signal::ctrl_c() => {
info!("Shutting down webserver due to SIGINT");
break;
},
_ = term_signal.recv() => {
info!("Shutting down webserver due to SIGTERM");
break;
},
}
}
}
/// Start running a Nostr relay server.
pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result<(), Error> {
trace!("Config: {:?}", settings);
// do some config validation.
if !Path::new(&settings.database.data_directory).is_dir() {
error!("Database directory does not exist");
return Err(Error::DatabaseDirError);
}
let addr = format!(
"{}:{}",
settings.network.address.trim(),
settings.network.port
);
let socket_addr = addr.parse().expect("listening address not valid");
// address whitelisting settings
if let Some(addr_whitelist) = &settings.authorization.pubkey_whitelist {
info!(
"Event publishing restricted to {} pubkey(s)",
addr_whitelist.len()
);
}
// check if NIP-05 enforced user verification is on
if settings.verified_users.is_active() {
info!(
"NIP-05 user verification mode:{:?}",
settings.verified_users.mode
);
if let Some(d) = settings.verified_users.verify_update_duration() {
info!("NIP-05 check user verification every: {:?}", d);
}
if let Some(d) = settings.verified_users.verify_expiration_duration() {
info!("NIP-05 user verification expires after: {:?}", d);
}
if let Some(wl) = &settings.verified_users.domain_whitelist {
info!("NIP-05 domain whitelist: {:?}", wl);
}
if let Some(bl) = &settings.verified_users.domain_blacklist {
info!("NIP-05 domain blacklist: {:?}", bl);
}
}
// configure tokio runtime
let rt = Builder::new_multi_thread()
.enable_all()
.thread_name("tokio-ws")
.build()
.unwrap();
// start tokio
rt.block_on(async {
let broadcast_buffer_limit = settings.limits.broadcast_buffer;
let persist_buffer_limit = settings.limits.event_persist_buffer;
let verified_users_active = settings.verified_users.is_active();
let db_min_conn = settings.database.min_conn;
let db_max_conn = settings.database.max_conn;
let settings = settings.clone();
info!("listening on: {}", socket_addr);
// all client-submitted valid events are broadcast to every
// other client on this channel. This should be large enough
// to accomodate slower readers (messages are dropped if
// clients can not keep up).
let (bcast_tx, _) = broadcast::channel::<Event>(broadcast_buffer_limit);
// validated events that need to be persisted are sent to the
// database on via this channel.
let (event_tx, event_rx) = mpsc::channel::<SubmittedEvent>(persist_buffer_limit);
// establish a channel for letting all threads now about a
// requested server shutdown.
let (invoke_shutdown, shutdown_listen) = broadcast::channel::<()>(1);
// create a channel for sending any new metadata event. These
// will get processed relatively slowly (a potentially
// multi-second blocking HTTP call) on a single thread, so we
// buffer requests on the channel. No harm in dropping events
// here, since we are protecting against DoS. This can make
// it difficult to setup initial metadata in bulk, since
// overwhelming this will drop events and won't register
// metadata events.
let (metadata_tx, metadata_rx) = broadcast::channel::<Event>(4096);
// start the database writer thread. Give it a channel for
// writing events, and for publishing events that have been
// written (to all connected clients).
db::db_writer(
settings.clone(),
event_rx,
bcast_tx.clone(),
metadata_tx.clone(),
shutdown_listen,
)
.await;
info!("db writer created");
// create a nip-05 verifier thread; if enabled.
if settings.verified_users.mode != VerifiedUsersMode::Disabled {
let verifier_opt =
nip05::Verifier::new(metadata_rx, bcast_tx.clone(), settings.clone());
if let Ok(mut v) = verifier_opt {
if verified_users_active {
tokio::task::spawn(async move {
info!("starting up NIP-05 verifier...");
v.run().await;
});
}
}
}
// listen for (external to tokio) shutdown request
let controlled_shutdown = invoke_shutdown.clone();
tokio::spawn(async move {
info!("control message listener started");
match shutdown_rx.recv() {
Ok(()) => {
info!("control message requesting shutdown");
controlled_shutdown.send(()).ok();
}
Err(std::sync::mpsc::RecvError) => {
// FIXME: spurious error on startup?
debug!("shutdown requestor is disconnected");
}
};
});
// listen for ctrl-c interruupts
let ctrl_c_shutdown = invoke_shutdown.clone();
// listener for webserver shutdown
let webserver_shutdown_listen = invoke_shutdown.subscribe();
tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap();
info!("shutting down due to SIGINT (main)");
ctrl_c_shutdown.send(()).ok();
});
// build a connection pool for sqlite connections
let pool = db::build_pool(
"client query",
&settings,
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY
| rusqlite::OpenFlags::SQLITE_OPEN_SHARED_CACHE,
db_min_conn,
db_max_conn,
true,
);
// A `Service` is needed for every connection, so this
// creates one from our `handle_request` function.
let make_svc = make_service_fn(|conn: &AddrStream| {
let svc_pool = pool.clone();
let remote_addr = conn.remote_addr();
let bcast = bcast_tx.clone();
let event = event_tx.clone();
let stop = invoke_shutdown.clone();
let settings = settings.clone();
async move {
// service_fn converts our function into a `Service`
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
handle_web_request(
request,
svc_pool.clone(),
settings.clone(),
remote_addr,
bcast.clone(),
event.clone(),
stop.subscribe(),
)
}))
}
});
let server = Server::bind(&socket_addr)
.serve(make_svc)
.with_graceful_shutdown(ctrl_c_or_signal(webserver_shutdown_listen));
// run hyper in this thread. This is why the thread does not return.
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
});
Ok(())
}
/// Nostr protocol messages from a client
#[derive(Deserialize, Serialize, Clone, PartialEq, Eq, Debug)]
#[serde(untagged)]
pub enum NostrMessage {
/// An `EVENT` message
EventMsg(EventCmd),
/// A `REQ` message
SubMsg(Subscription),
/// A `CLOSE` message
CloseMsg(CloseCmd),
}
/// Convert Message to NostrMessage
fn convert_to_msg(msg: String, max_bytes: Option<usize>) -> Result<NostrMessage> {
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
match parsed_res {
Ok(m) => {
if let NostrMessage::EventMsg(_) = m {
if let Some(max_size) = max_bytes {
// check length, ensure that some max size is set.
if msg.len() > max_size && max_size > 0 {
return Err(Error::EventMaxLengthError(msg.len()));
}
}
}
Ok(m)
}
Err(e) => {
debug!("proto parse error: {:?}", e);
debug!("parse error on message: {}", msg.trim());
Err(Error::ProtoParseError)
}
}
}
/// Turn a string into a NOTICE message ready to send over a WebSocket
fn make_notice_message(notice: Notice) -> Message {
let json = match notice {
Notice::Message(ref msg) => json!(["NOTICE", msg]),
Notice::EventResult(ref res) => json!(["OK", res.id, res.status.to_bool(), res.msg]),
};
Message::text(json.to_string())
}
struct ClientInfo {
remote_ip: String,
user_agent: Option<String>,
}
/// Handle new client connections. This runs through an event loop
/// for all client communication.
async fn nostr_server(
pool: db::SqlitePool,
client_info: ClientInfo,
settings: Settings,
mut ws_stream: WebSocketStream<Upgraded>,
broadcast: Sender<Event>,
event_tx: mpsc::Sender<SubmittedEvent>,
mut shutdown: Receiver<()>,
) {
// the time this websocket nostr server started
let orig_start = Instant::now();
// get a broadcast channel for clients to communicate on
let mut bcast_rx = broadcast.subscribe();
// Track internal client state
let mut conn = conn::ClientConn::new(client_info.remote_ip);
// Use the remote IP as the client identifier
let cid = conn.get_client_prefix();
// Create a channel for receiving query results from the database.
// we will send out the tx handle to any query we generate.
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(256);
// Create channel for receiving NOTICEs
let (notice_tx, mut notice_rx) = mpsc::channel::<Notice>(32);
// last time this client sent data (message, ping, etc.)
let mut last_message_time = Instant::now();
// ping interval (every 5 minutes)
let default_ping_dur = Duration::from_secs(settings.network.ping_interval_seconds.into());
// disconnect after 20 minutes without a ping response or event.
let max_quiet_time = Duration::from_secs(60 * 20);
let start = tokio::time::Instant::now() + default_ping_dur;
let mut ping_interval = tokio::time::interval_at(start, default_ping_dur);
// maintain a hashmap of a oneshot channel for active subscriptions.
// when these subscriptions are cancelled, make a message
// available to the executing query so it knows to stop.
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
// keep track of the subscriptions we have
let mut current_subs: Vec<Subscription> = Vec::new();
// for stats, keep track of how many events the client published,
// and how many it received from queries.
let mut client_published_event_count: usize = 0;
let mut client_received_event_count: usize = 0;
debug!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
if let Some(ua) = client_info.user_agent {
debug!("cid: {}, user-agent: {:?}", cid, ua);
}
loop {
tokio::select! {
_ = shutdown.recv() => {
info!("Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?}", cid, conn.ip(), orig_start.elapsed());
// server shutting down, exit loop
break;
},
_ = ping_interval.tick() => {
// check how long since we talked to client
// if it has been too long, disconnect
if last_message_time.elapsed() > max_quiet_time {
debug!("ending connection due to lack of client ping response");
break;
}
// Send a ping
ws_stream.send(Message::Ping(Vec::new())).await.ok();
},
Some(notice_msg) = notice_rx.recv() => {
ws_stream.send(make_notice_message(notice_msg)).await.ok();
},
Some(query_result) = query_rx.recv() => {
// database informed us of a query result we asked for
let subesc = query_result.sub_id.replace('"', "");
if query_result.event == "EOSE" {
let send_str = format!("[\"EOSE\",\"{}\"]", subesc);
ws_stream.send(Message::Text(send_str)).await.ok();
} else {
client_received_event_count += 1;
// send a result
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
ws_stream.send(Message::Text(send_str)).await.ok();
}
},
// TODO: consider logging the LaggedRecv error
Ok(global_event) = bcast_rx.recv() => {
// an event has been broadcast to all clients
// first check if there is a subscription for this event.
for (s, sub) in conn.subscriptions() {
if !sub.interested_in_event(&global_event) {
continue;
}
// TODO: serialize at broadcast time, instead of
// once for each consumer.
if let Ok(event_str) = serde_json::to_string(&global_event) {
debug!("sub match for client: {}, sub: {:?}, event: {:?}",
cid, s,
global_event.get_event_id_prefix());
// create an event response and send it
let subesc = s.replace('"', "");
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{}\",{}]", subesc, event_str))).await.ok();
} else {
warn!("could not serialize event: {:?}", global_event.get_event_id_prefix());
}
}
},
ws_next = ws_stream.next() => {
// update most recent message time for client
last_message_time = Instant::now();
// Consume text messages from the client, parse into Nostr messages.
let nostr_msg = match ws_next {
Some(Ok(Message::Text(m))) => {
convert_to_msg(m,settings.limits.max_event_bytes)
},
Some(Ok(Message::Binary(_))) => {
ws_stream.send(
make_notice_message(Notice::message("binary messages are not accepted".into()))).await.ok();
continue;
},
Some(Ok(Message::Ping(_) | Message::Pong(_))) => {
// get a ping/pong, ignore. tungstenite will
// send responses automatically.
continue;
},
Some(Err(WsError::Capacity(MessageTooLong{size, max_size}))) => {
ws_stream.send(
make_notice_message(Notice::message(format!("message too large ({} > {})",size, max_size)))).await.ok();
continue;
},
None |
Some(Ok(Message::Close(_)) |
Err(WsError::AlreadyClosed | WsError::ConnectionClosed |
WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
=> {
debug!("websocket close from client (cid: {}, ip: {:?})",cid, conn.ip());
break;
},
Some(Err(WsError::Io(e))) => {
// IO errors are considered fatal
warn!("IO error (cid: {}, ip: {:?}): {:?}", cid, conn.ip(), e);
break;
}
x => {
// default condition on error is to close the client connection
info!("unknown error (cid: {}, ip: {:?}): {:?} (closing conn)", cid, conn.ip(), x);
break;
}
};
// convert ws_next into proto_next
match nostr_msg {
Ok(NostrMessage::EventMsg(ec)) => {
// An EventCmd needs to be validated to be converted into an Event
// handle each type of message
let evid = ec.event_id().to_owned();
let parsed : Result<Event> = Result::<Event>::from(ec);
match parsed {
Ok(e) => {
let id_prefix:String = e.id.chars().take(8).collect();
debug!("successfully parsed/validated event: {:?} (cid: {})", id_prefix, cid);
// check if the event is too far in the future.
if e.is_valid_timestamp(settings.options.reject_future_seconds) {
// Write this to the database.
let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone() };
event_tx.send(submit_event).await.ok();
client_published_event_count += 1;
} else {
info!("client: {} sent a far future-dated event", cid);
if let Some(fut_sec) = settings.options.reject_future_seconds {
let msg = format!("The event created_at field is out of the acceptable range (+{}sec) for this relay.",fut_sec);
let notice = Notice::invalid(e.id, &msg);
ws_stream.send(make_notice_message(notice)).await.ok();
}
}
},
Err(e) => {
info!("client sent an invalid event (cid: {})", cid);
ws_stream.send(make_notice_message(Notice::invalid(evid, &format!("{}", e)))).await.ok();
}
}
},
Ok(NostrMessage::SubMsg(s)) => {
debug!("subscription requested (cid: {}, sub: {:?})", cid, s.id);
// subscription handling consists of:
// * registering the subscription so future events can be matched
// * making a channel to cancel to request later
// * sending a request for a SQL query
// Do nothing if the sub already exists.
if !current_subs.contains(&s) {
current_subs.push(s.clone());
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
match conn.subscribe(s.clone()) {
Ok(()) => {
// when we insert, if there was a previous query running with the same name, cancel it.
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
previous_query.send(()).ok();
}
// start a database query
db::db_query(s, cid.to_owned(), pool.clone(), query_tx.clone(), abandon_query_rx).await;
},
Err(e) => {
info!("Subscription error: {}", e);
ws_stream.send(make_notice_message(Notice::message(format!("Subscription error: {}", e)))).await.ok();
}
}
} else {
info!("client send duplicate subscription, ignoring (cid: {}, sub: {:?})", cid, s.id);
}
},
Ok(NostrMessage::CloseMsg(cc)) => {
// closing a request simply removes the subscription.
let parsed : Result<Close> = Result::<Close>::from(cc);
if let Ok(c) = parsed {
// remove from the list of known subs
if let Some(pos) = current_subs.iter().position(|s| *s.id == c.id) {
current_subs.remove(pos);
}
// check if a query is currently
// running, and remove it if so.
let stop_tx = running_queries.remove(&c.id);
if let Some(tx) = stop_tx {
tx.send(()).ok();
}
// stop checking new events against
// the subscription
conn.unsubscribe(&c);
} else {
info!("invalid command ignored");
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
}
},
Err(Error::ConnError) => {
debug!("got connection close/error, disconnecting cid: {}, ip: {:?}",cid, conn.ip());
break;
}
Err(Error::EventMaxLengthError(s)) => {
info!("client sent event larger ({} bytes) than max size (cid: {})", s, cid);
ws_stream.send(make_notice_message(Notice::message("event exceeded max size".into()))).await.ok();
},
Err(Error::ProtoParseError) => {
info!("client sent event that could not be parsed (cid: {})", cid);
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
},
Err(e) => {
info!("got non-fatal error from client (cid: {}, error: {:?}", cid, e);
},
}
},
}
}
// connection cleanup - ensure any still running queries are terminated.
for (_, stop_tx) in running_queries {
stop_tx.send(()).ok();
}
info!(
"stopping client connection (cid: {}, ip: {:?}, sent: {} events, recv: {} events, connected: {:?})",
cid,
conn.ip(),
client_published_event_count,
client_received_event_count,
orig_start.elapsed()
);
}

View File

@@ -1,10 +1,14 @@
//! Subscription and filter parsing //! Subscription and filter parsing
use crate::error::Result; use crate::error::Result;
use crate::event::Event; use crate::event::Event;
use serde::de::Unexpected;
use serde::{Deserialize, Deserializer, Serialize}; use serde::{Deserialize, Deserializer, Serialize};
use serde_json::Value;
use std::collections::HashMap;
use std::collections::HashSet;
/// Subscription identifier and set of request filters /// Subscription identifier and set of request filters
#[derive(Serialize, PartialEq, Debug, Clone)] #[derive(Serialize, PartialEq, Eq, Debug, Clone)]
pub struct Subscription { pub struct Subscription {
pub id: String, pub id: String,
pub filters: Vec<ReqFilter>, pub filters: Vec<ReqFilter>,
@@ -15,24 +19,111 @@ pub struct Subscription {
/// Corresponds to client-provided subscription request elements. Any /// Corresponds to client-provided subscription request elements. Any
/// element can be present if it should be used in filtering, or /// element can be present if it should be used in filtering, or
/// absent ([`None`]) if it should be ignored. /// absent ([`None`]) if it should be ignored.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] #[derive(Serialize, PartialEq, Eq, Debug, Clone)]
pub struct ReqFilter { pub struct ReqFilter {
/// Event hash /// Event hashes
pub id: Option<String>, pub ids: Option<Vec<String>>,
/// Event kind /// Event kinds
pub kind: Option<u64>, pub kinds: Option<Vec<u64>>,
/// Referenced event hash
#[serde(rename = "#e")]
pub event: Option<String>,
/// Referenced public key for a petname
#[serde(rename = "#p")]
pub pubkey: Option<String>,
/// Events published after this time /// Events published after this time
pub since: Option<u64>, pub since: Option<u64>,
/// Events published before this time /// Events published before this time
pub until: Option<u64>, pub until: Option<u64>,
/// List of author public keys /// List of author public keys
pub authors: Option<Vec<String>>, pub authors: Option<Vec<String>>,
/// Limit number of results
pub limit: Option<u64>,
/// Set of tags
#[serde(skip)]
pub tags: Option<HashMap<char, HashSet<String>>>,
/// Force no matches due to malformed data
// we can't represent it in the req filter, so we don't want to
// erroneously match. This basically indicates the req tried to
// do something invalid.
pub force_no_match: bool,
}
impl<'de> Deserialize<'de> for ReqFilter {
fn deserialize<D>(deserializer: D) -> Result<ReqFilter, D::Error>
where
D: Deserializer<'de>,
{
let received: Value = Deserialize::deserialize(deserializer)?;
let filter = received.as_object().ok_or_else(|| {
serde::de::Error::invalid_type(
Unexpected::Other("reqfilter is not an object"),
&"a json object",
)
})?;
let mut rf = ReqFilter {
ids: None,
kinds: None,
since: None,
until: None,
authors: None,
limit: None,
tags: None,
force_no_match: false,
};
let mut ts = None;
// iterate through each key, and assign values that exist
for (key, val) in filter.into_iter() {
// ids
if key == "ids" {
rf.ids = Deserialize::deserialize(val).ok();
} else if key == "kinds" {
rf.kinds = Deserialize::deserialize(val).ok();
} else if key == "since" {
rf.since = Deserialize::deserialize(val).ok();
} else if key == "until" {
rf.until = Deserialize::deserialize(val).ok();
} else if key == "limit" {
rf.limit = Deserialize::deserialize(val).ok();
} else if key == "authors" {
rf.authors = Deserialize::deserialize(val).ok();
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
if let Some(tag_search) = tag_search_char_from_filter(key) {
if ts.is_none() {
// Initialize the tag if necessary
ts = Some(HashMap::new());
}
if let Some(m) = ts.as_mut() {
let tag_vals: Option<Vec<String>> = Deserialize::deserialize(val).ok();
if let Some(v) = tag_vals {
let hs = HashSet::from_iter(v.into_iter());
m.insert(tag_search.to_owned(), hs);
}
};
} else {
// tag search that is multi-character, don't add to subscription
rf.force_no_match = true;
continue;
}
}
}
rf.tags = ts;
Ok(rf)
}
}
/// Attempt to form a single-char identifier from a tag search filter
fn tag_search_char_from_filter(tagname: &str) -> Option<char> {
let tagname_nohash = &tagname[1..];
// We return the tag character if and only if the tagname consists
// of a single char.
let mut tagnamechars = tagname_nohash.chars();
let firstchar = tagnamechars.next();
match firstchar {
Some(_) => {
// check second char
if tagnamechars.next().is_none() {
firstchar
} else {
None
}
}
None => None,
}
} }
impl<'de> Deserialize<'de> for Subscription { impl<'de> Deserialize<'de> for Subscription {
@@ -42,7 +133,7 @@ impl<'de> Deserialize<'de> for Subscription {
where where
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
let mut v: serde_json::Value = Deserialize::deserialize(deserializer)?; let mut v: Value = Deserialize::deserialize(deserializer)?;
// this shoud be a 3-or-more element array. // this shoud be a 3-or-more element array.
// verify the first element is a String, REQ // verify the first element is a String, REQ
// get the subscription from the second element. // get the subscription from the second element.
@@ -77,6 +168,7 @@ impl<'de> Deserialize<'de> for Subscription {
for fv in i { for fv in i {
let f: ReqFilter = serde_json::from_value(fv.take()) let f: ReqFilter = serde_json::from_value(fv.take())
.map_err(|_| serde::de::Error::custom("could not parse filter"))?; .map_err(|_| serde::de::Error::custom("could not parse filter"))?;
// create indexes
filters.push(f); filters.push(f);
} }
Ok(Subscription { Ok(Subscription {
@@ -103,46 +195,76 @@ impl Subscription {
} }
} }
impl ReqFilter { fn prefix_match(prefixes: &[String], target: &str) -> bool {
/// Check for a match within the authors list. for prefix in prefixes {
// TODO: Ambiguity; what if the array is empty? Should we if target.starts_with(prefix) {
// consider that the same as null? return true;
fn authors_match(&self, event: &Event) -> bool { }
self.authors
.as_ref()
.map(|vs| vs.contains(&event.pubkey.to_owned()))
.unwrap_or(true)
} }
/// Check if this filter either matches, or does not care about the event tags. // none matched
fn event_match(&self, event: &Event) -> bool { false
self.event }
impl ReqFilter {
fn ids_match(&self, event: &Event) -> bool {
self.ids
.as_ref() .as_ref()
.map(|t| event.event_tag_match(t)) .map(|vs| prefix_match(vs, &event.id))
.unwrap_or(true) .unwrap_or(true)
} }
/// Check if this filter either matches, or does not care about fn authors_match(&self, event: &Event) -> bool {
/// the pubkey/petname tags. self.authors
fn pubkey_match(&self, event: &Event) -> bool {
self.pubkey
.as_ref() .as_ref()
.map(|t| event.pubkey_tag_match(t)) .map(|vs| prefix_match(vs, &event.pubkey))
.unwrap_or(true) .unwrap_or(true)
} }
fn delegated_authors_match(&self, event: &Event) -> bool {
if let Some(delegated_pubkey) = &event.delegated_by {
self.authors
.as_ref()
.map(|vs| prefix_match(vs, delegated_pubkey))
.unwrap_or(true)
} else {
false
}
}
fn tag_match(&self, event: &Event) -> bool {
// get the hashset from the filter.
if let Some(map) = &self.tags {
for (key, val) in map.iter() {
let tag_match = event.generic_tag_val_intersect(*key, val);
// if there is no match for this tag, the match fails.
if !tag_match {
return false;
}
// if there was a match, we move on to the next one.
}
}
// if the tag map is empty, the match succeeds (there was no filter)
true
}
/// Check if this filter either matches, or does not care about the kind. /// Check if this filter either matches, or does not care about the kind.
fn kind_match(&self, kind: u64) -> bool { fn kind_match(&self, kind: u64) -> bool {
self.kind.map(|v| v == kind).unwrap_or(true) self.kinds
.as_ref()
.map(|ks| ks.contains(&kind))
.unwrap_or(true)
} }
/// Determine if all populated fields in this filter match the provided event. /// Determine if all populated fields in this filter match the provided event.
pub fn interested_in_event(&self, event: &Event) -> bool { pub fn interested_in_event(&self, event: &Event) -> bool {
self.id.as_ref().map(|v| v == &event.id).unwrap_or(true) // self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
self.ids_match(event)
&& self.since.map(|t| event.created_at > t).unwrap_or(true) && self.since.map(|t| event.created_at > t).unwrap_or(true)
&& self.until.map(|t| event.created_at < t).unwrap_or(true)
&& self.kind_match(event.kind) && self.kind_match(event.kind)
&& self.authors_match(event) && (self.authors_match(event) || self.delegated_authors_match(event))
&& self.pubkey_match(event) && self.tag_match(event)
&& self.event_match(event) && !self.force_no_match
} }
} }
@@ -173,54 +295,146 @@ mod tests {
} }
#[test] #[test]
fn invalid_filter() { fn legacy_filter() {
// unrecognized field in filter // legacy field in filter
let raw_json = "[\"REQ\",\"some-id\",{\"foo\": 3}]"; let raw_json = "[\"REQ\",\"some-id\",{\"kind\": 3}]";
assert!(serde_json::from_str::<Subscription>(raw_json).is_err()); assert!(serde_json::from_str::<Subscription>(raw_json).is_ok());
} }
#[test] #[test]
fn author_filter() -> Result<()> { fn author_filter() -> Result<()> {
let raw_json = "[\"REQ\",\"some-id\",{\"author\": \"test-author-id\"}]"; let raw_json = r#"["REQ","some-id",{"authors": ["test-author-id"]}]"#;
let s: Subscription = serde_json::from_str(raw_json)?; let s: Subscription = serde_json::from_str(raw_json)?;
assert_eq!(s.id, "some-id"); assert_eq!(s.id, "some-id");
assert_eq!(s.filters.len(), 1); assert_eq!(s.filters.len(), 1);
let first_filter = s.filters.get(0).unwrap(); let first_filter = s.filters.get(0).unwrap();
assert_eq!(first_filter.author, Some("test-author-id".to_owned())); assert_eq!(
first_filter.authors,
Some(vec!("test-author-id".to_owned()))
);
Ok(())
}
#[test]
fn interest_author_prefix_match() -> Result<()> {
// subscription with a filter for ID
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors": ["abc"]}]"#)?;
let e = Event {
id: "foo".to_owned(),
pubkey: "abcd".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
content: "".to_owned(),
sig: "".to_owned(),
tagidx: None,
};
assert!(s.interested_in_event(&e));
Ok(())
}
#[test]
fn interest_id_prefix_match() -> Result<()> {
// subscription with a filter for ID
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"]}]"#)?;
let e = Event {
id: "abcd".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
content: "".to_owned(),
sig: "".to_owned(),
tagidx: None,
};
assert!(s.interested_in_event(&e));
Ok(()) Ok(())
} }
#[test] #[test]
fn interest_id_nomatch() -> Result<()> { fn interest_id_nomatch() -> Result<()> {
// subscription with a filter for ID // subscription with a filter for ID
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"id":"abc"}]"#)?; let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"ids": ["xyz"]}]"#)?;
let e = Event { let e = Event {
id: "abcde".to_owned(), id: "abcde".to_owned(),
pubkey: "".to_owned(), pubkey: "".to_owned(),
delegated_by: None,
created_at: 0, created_at: 0,
kind: 0, kind: 0,
tags: Vec::new(), tags: Vec::new(),
content: "".to_owned(), content: "".to_owned(),
sig: "".to_owned(), sig: "".to_owned(),
tagidx: None,
}; };
assert_eq!(s.interested_in_event(&e), false); assert!(!s.interested_in_event(&e));
Ok(())
}
#[test]
fn interest_until() -> Result<()> {
// subscription with a filter for ID and time
let s: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "until": 1000}]"#)?;
let e = Event {
id: "abc".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 50,
kind: 0,
tags: Vec::new(),
content: "".to_owned(),
sig: "".to_owned(),
tagidx: None,
};
assert!(s.interested_in_event(&e));
Ok(())
}
#[test]
fn interest_range() -> Result<()> {
// subscription with a filter for ID and time
let s_in: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 200}]"#)?;
let s_before: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 140}]"#)?;
let s_after: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 160, "until": 200}]"#)?;
let e = Event {
id: "abc".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 150,
kind: 0,
tags: Vec::new(),
content: "".to_owned(),
sig: "".to_owned(),
tagidx: None,
};
assert!(s_in.interested_in_event(&e));
assert!(!s_before.interested_in_event(&e));
assert!(!s_after.interested_in_event(&e));
Ok(()) Ok(())
} }
#[test] #[test]
fn interest_time_and_id() -> Result<()> { fn interest_time_and_id() -> Result<()> {
// subscription with a filter for ID and time // subscription with a filter for ID and time
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"id":"abc", "since": 1000}]"#)?; let s: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 1000}]"#)?;
let e = Event { let e = Event {
id: "abc".to_owned(), id: "abc".to_owned(),
pubkey: "".to_owned(), pubkey: "".to_owned(),
delegated_by: None,
created_at: 50, created_at: 50,
kind: 0, kind: 0,
tags: Vec::new(), tags: Vec::new(),
content: "".to_owned(), content: "".to_owned(),
sig: "".to_owned(), sig: "".to_owned(),
tagidx: None,
}; };
assert_eq!(s.interested_in_event(&e), false); assert!(!s.interested_in_event(&e));
Ok(()) Ok(())
} }
@@ -231,13 +445,15 @@ mod tests {
let e = Event { let e = Event {
id: "abc".to_owned(), id: "abc".to_owned(),
pubkey: "".to_owned(), pubkey: "".to_owned(),
delegated_by: None,
created_at: 1001, created_at: 1001,
kind: 0, kind: 0,
tags: Vec::new(), tags: Vec::new(),
content: "".to_owned(), content: "".to_owned(),
sig: "".to_owned(), sig: "".to_owned(),
tagidx: None,
}; };
assert_eq!(s.interested_in_event(&e), true); assert!(s.interested_in_event(&e));
Ok(()) Ok(())
} }
@@ -248,13 +464,15 @@ mod tests {
let e = Event { let e = Event {
id: "abc".to_owned(), id: "abc".to_owned(),
pubkey: "".to_owned(), pubkey: "".to_owned(),
delegated_by: None,
created_at: 0, created_at: 0,
kind: 0, kind: 0,
tags: Vec::new(), tags: Vec::new(),
content: "".to_owned(), content: "".to_owned(),
sig: "".to_owned(), sig: "".to_owned(),
tagidx: None,
}; };
assert_eq!(s.interested_in_event(&e), true); assert!(s.interested_in_event(&e));
Ok(()) Ok(())
} }
@@ -265,30 +483,34 @@ mod tests {
let e = Event { let e = Event {
id: "123".to_owned(), id: "123".to_owned(),
pubkey: "abc".to_owned(), pubkey: "abc".to_owned(),
delegated_by: None,
created_at: 0, created_at: 0,
kind: 0, kind: 0,
tags: Vec::new(), tags: Vec::new(),
content: "".to_owned(), content: "".to_owned(),
sig: "".to_owned(), sig: "".to_owned(),
tagidx: None,
}; };
assert_eq!(s.interested_in_event(&e), true); assert!(s.interested_in_event(&e));
Ok(()) Ok(())
} }
#[test]
#[test]
fn authors_multi_pubkey() -> Result<()> { fn authors_multi_pubkey() -> Result<()> {
// check for any of a set of authors, against the pubkey // check for any of a set of authors, against the pubkey
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors":["abc", "bcd"]}]"#)?; let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors":["abc", "bcd"]}]"#)?;
let e = Event { let e = Event {
id: "123".to_owned(), id: "123".to_owned(),
pubkey: "bcd".to_owned(), pubkey: "bcd".to_owned(),
delegated_by: None,
created_at: 0, created_at: 0,
kind: 0, kind: 0,
tags: Vec::new(), tags: Vec::new(),
content: "".to_owned(), content: "".to_owned(),
sig: "".to_owned(), sig: "".to_owned(),
tagidx: None,
}; };
assert_eq!(s.interested_in_event(&e), true); assert!(s.interested_in_event(&e));
Ok(()) Ok(())
} }
@@ -299,13 +521,15 @@ mod tests {
let e = Event { let e = Event {
id: "123".to_owned(), id: "123".to_owned(),
pubkey: "xyz".to_owned(), pubkey: "xyz".to_owned(),
delegated_by: None,
created_at: 0, created_at: 0,
kind: 0, kind: 0,
tags: Vec::new(), tags: Vec::new(),
content: "".to_owned(), content: "".to_owned(),
sig: "".to_owned(), sig: "".to_owned(),
tagidx: None,
}; };
assert_eq!(s.interested_in_event(&e), false); assert!(!s.interested_in_event(&e));
Ok(()) Ok(())
} }
} }

33
src/utils.rs Normal file
View File

@@ -0,0 +1,33 @@
//! Common utility functions
use std::time::SystemTime;
/// Seconds since 1970.
pub fn unix_time() -> u64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
.unwrap_or(0)
}
/// Check if a string contains only hex characters.
pub fn is_hex(s: &str) -> bool {
s.chars().all(|x| char::is_ascii_hexdigit(&x))
}
/// Check if a string contains only lower-case hex chars.
pub fn is_lower_hex(s: &str) -> bool {
s.chars().all(|x| {
(char::is_ascii_lowercase(&x) || char::is_ascii_digit(&x)) && char::is_ascii_hexdigit(&x)
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn lower_hex() {
let hexstr = "abcd0123";
assert_eq!(is_lower_hex(hexstr), true);
}
}

110
tests/common/mod.rs Normal file
View File

@@ -0,0 +1,110 @@
use anyhow::{anyhow, Result};
use nostr_rs_relay::config;
use nostr_rs_relay::server::start_server;
//use http::{Request, Response};
use hyper::{Client, StatusCode, Uri};
use std::net::TcpListener;
use std::sync::atomic::{AtomicU16, Ordering};
use std::sync::mpsc as syncmpsc;
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
use std::thread;
use std::thread::JoinHandle;
use std::time::Duration;
use tracing::{debug, info};
pub struct Relay {
pub port: u16,
pub handle: JoinHandle<()>,
pub shutdown_tx: MpscSender<()>,
}
pub fn start_relay() -> Result<Relay> {
// setup tracing
let _trace_sub = tracing_subscriber::fmt::try_init();
info!("Starting a new relay");
// replace default settings
let mut settings = config::Settings::default();
// identify open port
info!("Checking for address...");
let port = get_available_port().unwrap();
info!("Found open port: {}", port);
// bind to local interface only
settings.network.address = "127.0.0.1".to_owned();
settings.network.port = port;
// create an in-memory DB with multiple readers
settings.database.in_memory = true;
settings.database.min_conn = 4;
settings.database.max_conn = 8;
let (shutdown_tx, shutdown_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
let handle = thread::spawn(|| {
// server will block the thread it is run on.
let _ = start_server(settings, shutdown_rx);
});
// how do we know the relay has finished starting up?
Ok(Relay {
port,
handle,
shutdown_tx,
})
}
// check if the server is healthy via HTTP request
async fn server_ready(relay: &Relay) -> Result<bool> {
let uri: String = format!("http://127.0.0.1:{}/", relay.port);
let client = Client::new();
let uri: Uri = uri.parse().unwrap();
let res = client.get(uri).await?;
Ok(res.status() == StatusCode::OK)
}
pub async fn wait_for_healthy_relay(relay: &Relay) -> Result<()> {
// TODO: maximum time to wait for server to become healthy.
// give it a little time to start up before we start polling
tokio::time::sleep(Duration::from_millis(10)).await;
loop {
let server_check = server_ready(relay).await;
match server_check {
Ok(true) => {
// server responded with 200-OK.
break;
}
Ok(false) => {
// server responded with an error, we're done.
return Err(anyhow!("Got non-200-OK from relay"));
}
Err(_) => {
// server is not yet ready, probably connection refused...
debug!("Relay not ready, will try again...");
tokio::time::sleep(Duration::from_millis(10)).await;
}
}
}
info!("relay is ready");
Ok(())
// simple message sent to web browsers
//let mut request = Request::builder()
// .uri("https://www.rust-lang.org/")
// .header("User-Agent", "my-awesome-agent/1.0");
}
// from https://elliotekj.com/posts/2017/07/25/find-available-tcp-port-rust/
// This needed some modification; if multiple tasks all ask for open ports, they will tend to get the same one.
// instead we should try to try these incrementally/globally.
static PORT_COUNTER: AtomicU16 = AtomicU16::new(4030);
fn get_available_port() -> Option<u16> {
let startsearch = PORT_COUNTER.fetch_add(10, Ordering::SeqCst);
if startsearch >= 20000 {
// wrap around
PORT_COUNTER.store(4030, Ordering::Relaxed);
}
(startsearch..20000).find(|port| port_is_available(*port))
}
pub fn port_is_available(port: u16) -> bool {
info!("checking on port {}", port);
match TcpListener::bind(("127.0.0.1", port)) {
Ok(_) => true,
Err(_) => false,
}
}

47
tests/integration_test.rs Normal file
View File

@@ -0,0 +1,47 @@
use anyhow::Result;
use std::thread;
use std::time::Duration;
mod common;
#[tokio::test]
async fn start_and_stop() -> Result<()> {
// this will be the common pattern for acquiring a new relay:
// start a fresh relay, on a port to-be-provided back to us:
let relay = common::start_relay()?;
// wait for the relay's webserver to start up and deliver a page:
common::wait_for_healthy_relay(&relay).await?;
let port = relay.port;
// just make sure we can startup and shut down.
// if we send a shutdown message before the server is listening,
// we will get a SendError. Keep sending until someone is
// listening.
loop {
let shutdown_res = relay.shutdown_tx.send(());
match shutdown_res {
Ok(()) => {
break;
}
Err(_) => {
thread::sleep(Duration::from_millis(100));
}
}
}
// wait for relay to shutdown
let thread_join = relay.handle.join();
assert!(thread_join.is_ok());
// assert that port is now available.
assert!(common::port_is_available(port));
Ok(())
}
#[tokio::test]
async fn relay_home_page() -> Result<()> {
// get a relay and wait for startup...
let relay = common::start_relay()?;
common::wait_for_healthy_relay(&relay).await?;
// tell relay to shutdown
let _res = relay.shutdown_tx.send(());
Ok(())
}