Compare commits

...

184 Commits

Author SHA1 Message Date
Greg Heartsfield
95748647f0 build: bump version to 0.7.13 2022-12-22 16:27:34 -06:00
Greg Heartsfield
25480e837f fix: do not block writers for more than 1 second during checkpoints 2022-12-22 16:10:49 -06:00
Greg Heartsfield
b80b54cd9d improvement: reduce logging, especially for database pool size 2022-12-22 15:47:33 -06:00
Greg Heartsfield
8ea732cbe5 feat: perform regular database maintenance (60sec), without blocking main writer thread 2022-12-22 15:16:21 -06:00
Greg Heartsfield
0f68c4e5c2 refactor: formatting 2022-12-22 15:15:45 -06:00
Greg Heartsfield
dab2cd5792 wip: future changes to rustfmt 2022-12-22 15:13:54 -06:00
Greg Heartsfield
f411aa6fc2 fix: do not re-verify NIP-05 entries where metadata was deleted 2022-12-22 13:01:48 -06:00
Greg Heartsfield
d31bbda087 improvement: reduce lifetime of database connections 2022-12-22 13:01:12 -06:00
Greg Heartsfield
5917bc53b2 improvement: run maintenance every 60 seconds instead of by event count 2022-12-22 11:40:17 -06:00
Greg Heartsfield
91177c61a1 improvement: log reason for new event creation from nip05 2022-12-22 10:48:30 -06:00
Greg Heartsfield
53c2a8051c improvement: reduce logging 2022-12-22 10:29:27 -06:00
Greg Heartsfield
168cf513ac feat: perform full checkpoints and truncate WAL every 2k events 2022-12-22 10:11:05 -06:00
Greg Heartsfield
ea204761c9 fix: do not show slow queries more than once per sub 2022-12-20 15:41:50 -06:00
Greg Heartsfield
c270ae1434 improvement: reduce event count for db writer pauses 2022-12-20 15:25:24 -06:00
Greg Heartsfield
64bd983cb6 perf: every 5000 persisted events, pause for 500ms for backups
I have observed backups running for a very long time under heavy load,
this introduces some artificial delay to give the online backup enough
time to make progress.
2022-12-20 15:05:04 -06:00
Greg Heartsfield
1c153bc784 perf: shed DB query load when queue gets large 2022-12-20 13:23:21 -06:00
Greg Heartsfield
dc11d9a619 improvement: explicitly rollback transaction on duplicate event 2022-12-20 13:23:04 -06:00
Greg Heartsfield
cd1557787b improvement: log write pool 2022-12-20 13:21:57 -06:00
Greg Heartsfield
86bb7aeb9a improvement: function to check pool capacity 2022-12-20 10:07:01 -06:00
Greg Heartsfield
ce37fc1a2d build: bump version to 0.7.12 2022-12-19 14:50:42 -06:00
Greg Heartsfield
2cfd384339 perf: drop db handles that are not quickly read 2022-12-19 00:18:39 -06:00
Greg Heartsfield
8c013107f9 perf: increase upper bound for sqlite mmap 2022-12-18 23:19:43 -06:00
Greg Heartsfield
64a4466d30 perf: backing down on max_blocking_threads 2022-12-18 23:14:41 -06:00
Greg Heartsfield
1596c23eb4 perf: increase blocking threads now that contention is reduced 2022-12-18 22:46:32 -06:00
Greg Heartsfield
129badd4e1 perf: reduce per thread mmap allocation for DB 2022-12-18 22:45:32 -06:00
Greg Heartsfield
6f7c080180 improvement: reduce number of writer blocking threads from 4->2 2022-12-18 22:32:31 -06:00
Greg Heartsfield
af92561ef6 perf: remove shared cache mode (experiment) 2022-12-18 22:15:50 -06:00
Greg Heartsfield
d833a3e40d perf: reduce logging 2022-12-18 22:11:46 -06:00
Greg Heartsfield
462eb46642 build: bump version to 0.7.11 2022-12-18 20:52:01 -06:00
Greg Heartsfield
cf144d503d perf: reduce logging for slow queries 2022-12-18 20:47:11 -06:00
Greg Heartsfield
fb8375aef2 build: bump version to 0.7.10 2022-12-18 13:46:18 -06:00
Greg Heartsfield
88ac31b549 perf: increase channel size for DB communication 2022-12-18 13:44:28 -06:00
Greg Heartsfield
677b7d39e9 improvement: log slow requests that return zero results 2022-12-18 13:42:31 -06:00
Greg Heartsfield
b24d2f9aaa perf: set default blocking threads to lower value 2022-12-18 12:20:57 -06:00
Greg Heartsfield
7a3899d852 build: bump version to 0.7.9 2022-12-18 09:21:07 -06:00
Greg Heartsfield
818108b793 improvement: upgrade multiple dependencies
Updating anyhow v1.0.66 -> v1.0.67
Updating async-trait v0.1.59 -> v0.1.60
Updating cxx v1.0.83 -> v1.0.84
Updating cxx-build v1.0.83 -> v1.0.84
Updating cxxbridge-flags v1.0.83 -> v1.0.84
Updating cxxbridge-macro v1.0.83 -> v1.0.84
Updating itoa v1.0.4 -> v1.0.5
Updating link-cplusplus v1.0.7 -> v1.0.8
Updating proc-macro2 v1.0.47 -> v1.0.48
Updating quote v1.0.21 -> v1.0.22
Updating rustversion v1.0.9 -> v1.0.11
Updating ryu v1.0.11 -> v1.0.12
Updating scratch v1.0.2 -> v1.0.3
Updating serde v1.0.150 -> v1.0.151
Updating serde_derive v1.0.150 -> v1.0.151
Updating serde_json v1.0.89 -> v1.0.90
Updating syn v1.0.105 -> v1.0.106
Updating thiserror v1.0.37 -> v1.0.38
Updating thiserror-impl v1.0.37 -> v1.0.38
Updating unicode-ident v1.0.5 -> v1.0.6
2022-12-18 09:16:09 -06:00
Greg Heartsfield
d10348f7e1 feat: configurable blocking threads 2022-12-18 09:14:04 -06:00
Greg Heartsfield
8598e443d8 wip: add configuration for future feature (client concurrent db limits) 2022-12-17 23:19:48 -06:00
Greg Heartsfield
43222d44e5 feat: perform optimization after seeing many events 2022-12-17 23:18:54 -06:00
Greg Heartsfield
7c1516c4fb perf: add index for tags 2022-12-17 23:17:53 -06:00
Greg Heartsfield
0c72053a49 perf: increase mmap size to 1GB 2022-12-17 23:17:16 -06:00
Greg Heartsfield
3f32ff67ab improvement: minor logging 2022-12-17 23:11:14 -06:00
Greg Heartsfield
0b9778d6ca refactor: simplify tracking of subscriptions 2022-12-17 20:46:58 -06:00
Greg Heartsfield
9be04120c7 build: bump version to 0.7.8 2022-12-17 12:01:43 -06:00
Greg Heartsfield
cc06167e06 perf: add composite index for tag table 2022-12-17 12:01:20 -06:00
Greg Heartsfield
b6e33f044f improvement: limit db connection max lifetime 2022-12-17 10:47:35 -06:00
Greg Heartsfield
1b2c6f9fca build: bump version to 0.7.7 2022-12-17 10:09:44 -06:00
Greg Heartsfield
0d8d39ad22 feat: add rate limiting setting for subscription creation 2022-12-17 09:27:29 -06:00
Greg Heartsfield
0e851d4f71 build: bump version to 0.7.6 2022-12-17 07:51:57 -06:00
Greg Heartsfield
3c880b2f49 perf: pull distinct to outermost SQL 2022-12-17 07:49:28 -06:00
Greg Heartsfield
7a4c9266ec improvement: make hexsearch structs sortable 2022-12-17 07:49:05 -06:00
Greg Heartsfield
e8557d421b build: bump version to 0.7.5 2022-12-16 17:21:00 -06:00
Greg Heartsfield
7ca9c864f2 improvement: DB pool logging shows used connections directly 2022-12-16 17:01:49 -06:00
Greg Heartsfield
838aafd079 improvement: consistent log messages for client/sub ids 2022-12-16 15:22:27 -06:00
Greg Heartsfield
e554b10ac2 improvement: tweak sub/sql logging for slow queries 2022-12-16 14:55:45 -06:00
Greg Heartsfield
b0bfaa48fc improvement: ignore duplicate REQ messages 2022-12-16 14:37:02 -06:00
Greg Heartsfield
2e9b1b6ba7 docs: comment reason for force_no_match 2022-12-16 14:35:21 -06:00
Greg Heartsfield
4d9012d94c improvement: upgrade docker builder and base images 2022-12-16 14:33:08 -06:00
Greg Heartsfield
ffe7aac066 improvement: upgrade multiple dependencies
Updating aho-corasick v0.7.19 -> v0.7.20
Updating async-trait v0.1.58 -> v0.1.59
Updating axum v0.5.17 -> v0.6.1
Updating axum-core v0.2.9 -> v0.3.0
Updating bytes v1.2.1 -> v1.3.0
Updating cc v1.0.76 -> v1.0.78
Updating crossbeam-utils v0.8.12 -> v0.8.14
Updating cxx v1.0.82 -> v1.0.83
Updating cxx-build v1.0.82 -> v1.0.83
Updating cxxbridge-flags v1.0.82 -> v1.0.83
Updating cxxbridge-macro v1.0.82 -> v1.0.83
Updating flate2 v1.0.24 -> v1.0.25
Updating libc v0.2.137 -> v0.2.138
Updating matchit v0.5.0 -> v0.7.0
Updating miniz_oxide v0.5.4 -> v0.6.2
Updating openssl v0.10.42 -> v0.10.44
Updating openssl-sys v0.9.77 -> v0.9.79
Updating parking_lot_core v0.9.4 -> v0.9.5
Updating pest v2.4.1 -> v2.5.1
Updating pest_derive v2.4.1 -> v2.5.1
Updating pest_generator v2.4.1 -> v2.5.1
Updating pest_meta v2.4.1 -> v2.5.1
Updating prost v0.11.2 -> v0.11.3
Adding rustversion v1.0.9
Updating serde v1.0.147 -> v1.0.150
Updating serde_derive v1.0.147 -> v1.0.150
Updating serde_json v1.0.88 -> v1.0.89
Updating sha-1 v0.10.0 -> v0.10.1
Updating syn v1.0.103 -> v1.0.105
Updating tokio v1.22.0 -> v1.23.0
Updating tokio-macros v1.8.0 -> v1.8.2
Updating toml v0.5.9 -> v0.5.10
Updating tonic v0.8.2 -> v0.8.3
Updating tower-http v0.3.4 -> v0.3.5
Updating typenum v1.15.0 -> v1.16.0
2022-12-16 11:17:05 -06:00
Greg Heartsfield
f9695bd0a9 fix: db schema version updates correctly for v9 2022-12-16 10:01:49 -06:00
Greg Heartsfield
7c4bf5cc8f fix: run db migration for v9 2022-12-16 08:21:00 -06:00
Greg Heartsfield
e2de162931 feat: only show SQL in logs for slow queries unless tracing 2022-12-16 08:17:39 -06:00
Greg Heartsfield
4f606615eb perf: indexing improvement 2022-12-16 08:16:49 -06:00
Greg Heartsfield
84a58ebbcd build: bump version to 0.7.3 2022-12-16 06:32:00 -06:00
Greg Heartsfield
c48e45686d perf: schema updates for better event indexing 2022-12-15 08:48:35 -06:00
Greg Heartsfield
bbe359364a refactor: clippy warnings 2022-12-15 08:43:36 -06:00
Greg Heartsfield
9e9c494367 perf: significant query speedup when using kinds.
fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/51
2022-12-14 21:04:49 -06:00
Greg Heartsfield
5fa24bc9f1 fix: send EOSE when ids list is empty in subscriptions
Fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/50
2022-11-19 10:35:00 -06:00
Greg Heartsfield
4de7490d97 fix: send EOSE when authors list is empty in subscriptions
Fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/49
2022-11-19 10:00:38 -06:00
Greg Heartsfield
d0f63dc66e docs: update container instructions for rootless podman 2022-11-19 09:32:26 -06:00
Greg Heartsfield
06078648c8 build: bump version to 0.7.2 2022-11-19 07:55:52 -06:00
Greg Heartsfield
cc0fcc5d66 docs: add Cargo package metadata 2022-11-19 07:32:17 -06:00
Greg Heartsfield
dfb2096653 improvement: build auditable binary in docker 2022-11-19 07:11:39 -06:00
Greg Heartsfield
486508d192 improvement: upgrade multiple dependencies
Updating crates.io index
Updating cc v1.0.74 -> v1.0.76
Updating chrono v0.4.22 -> v0.4.23
Updating cxx v1.0.80 -> v1.0.82
Updating cxx-build v1.0.80 -> v1.0.82
Updating cxxbridge-flags v1.0.80 -> v1.0.82
Updating cxxbridge-macro v1.0.80 -> v1.0.82
Updating digest v0.10.5 -> v0.10.6
Updating hyper v0.14.22 -> v0.14.23
Updating indexmap v1.9.1 -> v1.9.2
Updating regex v1.6.0 -> v1.7.0
Updating regex-syntax v0.6.27 -> v0.6.28
Updating serde_json v1.0.87 -> v1.0.88
Updating tokio v1.21.2 -> v1.22.0
Updating uuid v1.2.1 -> v1.2.2
2022-11-19 06:52:06 -06:00
Greg Heartsfield
84b43c144b improvement: use locked cargo packages to build container images 2022-11-19 06:29:13 -06:00
Greg Heartsfield
110500bb46 feat(NIP-20): advertise support for NIP-20 in relay info/readme 2022-11-12 09:22:43 -06:00
Greg Heartsfield
83f6b11de7 refactor: clippy fix 2022-11-12 09:22:24 -06:00
William Casarin
6d1244434b feat(NIP-20): improve invalid event error messages
Instead of returning a NOTICE for invalid events, return a `OK false`
command result with a reason as to why the event is invalid.
2022-11-12 09:13:22 -06:00
William Casarin
5a91419d34 feat(NIP-20): send command results to clients
When submitting events to relays, clients currently have no way to know
if an event was successfully committed to the database. This NIP
introduces the concept of command results which are like NOTICE's except
provide more information about if an event was accepted or rejected.

A command result is a JSON object with the following structure that is
returned when an event is successfully saved to the database or
rejected:

	["OK", <event_id>, <true|false>, <message>]

nip20: https://github.com/nostr-protocol/nips/pull/62
2022-11-12 09:12:35 -06:00
William Casarin
7adc5c9af7 perf: dont create intermediate vecs when matching subs
Avoid creating intermediate vectors when matching subscriptions. We can
just iterate over the hashmap directly.
2022-11-09 07:30:43 -06:00
Greg Heartsfield
9dd4571bee refactor: reduce level of some common DB logs 2022-11-06 13:49:32 -06:00
Greg Heartsfield
9db5a26b9c refactor: more consistent logging messages 2022-11-05 16:11:20 -05:00
Greg Heartsfield
ac345b5744 refactor: do not quote server-generated client id in logs 2022-11-05 15:59:39 -05:00
Greg Heartsfield
675662c7fb improvement: upgrade docker builder and base images 2022-11-05 13:24:17 -05:00
Greg Heartsfield
505b0cb71f improvement: upgrade multiple dependencies
Updating anyhow v1.0.65 -> v1.0.66
Updating async-trait v0.1.57 -> v0.1.58
Updating axum v0.5.16 -> v0.5.17
Updating axum-core v0.2.8 -> v0.2.9
Updating base64 v0.13.0 -> v0.13.1
Updating bumpalo v3.11.0 -> v3.11.1
Updating cc v1.0.73 -> v1.0.74
Updating cxx v1.0.79 -> v1.0.80
Updating cxx-build v1.0.79 -> v1.0.80
Updating cxxbridge-flags v1.0.79 -> v1.0.80
Updating cxxbridge-macro v1.0.79 -> v1.0.80
Updating futures v0.3.24 -> v0.3.25
Updating futures-channel v0.3.24 -> v0.3.25
Updating futures-core v0.3.24 -> v0.3.25
Updating futures-executor v0.3.24 -> v0.3.25
Updating futures-io v0.3.24 -> v0.3.25
Updating futures-macro v0.3.24 -> v0.3.25
Updating futures-sink v0.3.24 -> v0.3.25
Updating futures-task v0.3.24 -> v0.3.25
Updating futures-util v0.3.24 -> v0.3.25
Updating getrandom v0.2.7 -> v0.2.8
Updating h2 v0.3.14 -> v0.3.15
Updating hyper v0.14.20 -> v0.14.22
Updating iana-time-zone v0.1.51 -> v0.1.53
Updating libc v0.2.135 -> v0.2.137
Updating mio v0.8.4 -> v0.8.5
Updating native-tls v0.2.10 -> v0.2.11
Updating num_cpus v1.13.1 -> v1.14.0
Updating once_cell v1.15.0 -> v1.16.0
Updating openssl-sys v0.9.76 -> v0.9.77
Updating parking_lot_core v0.9.3 -> v0.9.4
Updating pest v2.4.0 -> v2.4.1
Updating pest_derive v2.4.0 -> v2.4.1
Updating pest_generator v2.4.0 -> v2.4.1
Updating pest_meta v2.4.0 -> v2.4.1
Updating pkg-config v0.3.25 -> v0.3.26
Updating ppv-lite86 v0.2.16 -> v0.2.17
Updating prost v0.11.0 -> v0.11.2
Updating prost-derive v0.11.0 -> v0.11.2
Updating prost-types v0.11.1 -> v0.11.2
Updating serde v1.0.145 -> v1.0.147
Updating serde_derive v1.0.145 -> v1.0.147
Updating serde_json v1.0.86 -> v1.0.87
Updating syn v1.0.102 -> v1.0.103
  Adding windows-sys v0.42.0
  Adding windows_aarch64_gnullvm v0.42.0
  Adding windows_aarch64_msvc v0.42.0
  Adding windows_i686_gnu v0.42.0
  Adding windows_i686_msvc v0.42.0
  Adding windows_x86_64_gnu v0.42.0
  Adding windows_x86_64_gnullvm v0.42.0
  Adding windows_x86_64_msvc v0.42.0
2022-11-05 10:59:03 -05:00
Greg Heartsfield
e8aa450802 build: bump version to 0.7.1 2022-11-05 10:35:38 -05:00
Greg Heartsfield
5a8860bb09 feat: log user-agent if present 2022-11-05 10:29:25 -05:00
Greg Heartsfield
11e43eccf9 refactor: add unit to ping_interval config 2022-11-05 07:42:08 -05:00
William Casarin
50577b2dfa feat: add network.ping_interval setting
Add a ping interval setting that allows you to customize the websocket
ping interval. The default of 5 minutes may be too high for some proxy
servers that disconnect connections that are held open for too long.
2022-11-05 07:40:28 -05:00
William Casarin
a6cb6f8486 refactor: rename get_header_remote_ip -> get_header_string
This function has nothing to do with remote ips!
2022-11-05 07:37:18 -05:00
Greg Heartsfield
ae5bf98d87 feat: retrieve client IP from header in config.toml
If the config.toml has defined a HTTP header to look for a remote IP,
that will be logged.  Otherwise, the socket address IP will be used.

closes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/47
2022-11-04 18:05:01 -05:00
William Casarin
1cf9d719f0 feat: look for proxied ip headers
This enables support for using the proxied IP from cloudflare. The damus
relay is behind cloudflare, so to get accurate remote ip logging we need
to look at the headers instead of the socket address.

Signed-off-by: William Casarin <jb55@jb55.com>
2022-11-04 17:09:28 -05:00
William Casarin
311f4b5283 refactor: switch new connections to debug log
These are pretty spammy on busy relays. I've been using the info log to
monitor spam attacks, and these are the least useful info log.

Leave the "stopping connection" log because it at least provides useful
sent/received information.

Signed-off-by: William Casarin <jb55@jb55.com>
2022-11-04 07:59:53 -05:00
Greg Heartsfield
14b5a51e3a fix: log ephemeral events after send 2022-11-04 07:55:38 -05:00
Greg Heartsfield
8ecce3f566 feat: show client IP in logs 2022-11-02 18:33:44 -05:00
Greg Heartsfield
caffbbbede build: bump version to 0.7.0 2022-10-16 15:42:11 -05:00
Greg Heartsfield
81045ad3d0 improvement: upgrade multiple dependencies
Updating anyhow v1.0.64 -> v1.0.65
  Adding codespan-reporting v0.11.1
Updating const_format v0.2.28 -> v0.2.30
Updating const_format_proc_macros v0.2.22 -> v0.2.29
Updating crossbeam-utils v0.8.11 -> v0.8.12
  Adding cxx v1.0.79
  Adding cxx-build v1.0.79
  Adding cxxbridge-flags v1.0.79
  Adding cxxbridge-macro v1.0.79
Updating digest v0.10.3 -> v0.10.5
Updating hdrhistogram v7.5.1 -> v7.5.2
Updating iana-time-zone v0.1.50 -> v0.1.51
  Adding iana-time-zone-haiku v0.1.1
Updating itertools v0.10.3 -> v0.10.5
Updating itoa v1.0.3 -> v1.0.4
Updating js-sys v0.3.59 -> v0.3.60
Updating libc v0.2.132 -> v0.2.135
  Adding link-cplusplus v1.0.7
Updating lock_api v0.4.8 -> v0.4.9
Updating once_cell v1.14.0 -> v1.15.0
Updating openssl v0.10.41 -> v0.10.42
Updating openssl-sys v0.9.75 -> v0.9.76
Updating pest v2.3.0 -> v2.4.0
Updating pest_derive v2.3.0 -> v2.4.0
Updating pest_generator v2.3.0 -> v2.4.0
Updating pest_meta v2.3.0 -> v2.4.0
Updating proc-macro2 v1.0.43 -> v1.0.47
Updating rand_core v0.6.3 -> v0.6.4
Updating raw-cpuid v10.5.0 -> v10.6.0
  Adding scratch v1.0.2
Updating serde v1.0.144 -> v1.0.145
Updating serde_derive v1.0.144 -> v1.0.145
Updating serde_json v1.0.85 -> v1.0.86
  Adding sha1 v0.10.5
Updating smallvec v1.9.0 -> v1.10.0
Updating syn v1.0.99 -> v1.0.102
  Adding termcolor v1.1.3
Updating thiserror v1.0.34 -> v1.0.37
Updating thiserror-impl v1.0.34 -> v1.0.37
Updating tokio v1.21.0 -> v1.21.2
Updating tokio-stream v0.1.9 -> v0.1.11
Updating tonic v0.8.1 -> v0.8.2
Updating tower-layer v0.3.1 -> v0.3.2
Updating tracing v0.1.36 -> v0.1.37
Updating tracing-attributes v0.1.22 -> v0.1.23
Updating tracing-core v0.1.29 -> v0.1.30
Updating tracing-subscriber v0.3.15 -> v0.3.16
Updating unicode-ident v1.0.3 -> v1.0.5
Updating unicode-normalization v0.1.21 -> v0.1.22
  Adding unicode-width v0.1.10
Updating uuid v1.1.2 -> v1.2.1
Updating wasm-bindgen v0.2.82 -> v0.2.83
Updating wasm-bindgen-backend v0.2.82 -> v0.2.83
Updating wasm-bindgen-macro v0.2.82 -> v0.2.83
Updating wasm-bindgen-macro-support v0.2.82 -> v0.2.83
Updating wasm-bindgen-shared v0.2.82 -> v0.2.83
Updating web-sys v0.3.59 -> v0.3.60
  Adding winapi-util v0.1.5
2022-10-16 15:33:11 -05:00
Greg Heartsfield
72f8a1aa5c feat(NIP-26): allow searches for delegated public keys
Implements core NIP-26 delegated event functionality.  Events can
include a `delegation` tag that provides a signature and restrictions
on which events can be delegated.

Notable points on the implementation so far:

* Schema has been upgraded to include an index and new column.
* Basic rune parsing/evaluation to implement the example event in the
  NIP, but no more.
* No special logic for deletion.
* No migration logic for determining delegated authors for
  already-stored events.
2022-10-16 15:25:06 -05:00
Greg Heartsfield
274c61bb72 improvement: upgrade docker images for base & builder 2022-10-13 18:42:55 -05:00
Greg Heartsfield
6eeefbcc4c feat: quick script for making non-x86 Dockerfiles 2022-10-13 18:35:33 -05:00
Greg Heartsfield
3e8adf978f refactor: move db migrations into isolated functions 2022-10-09 08:54:03 -05:00
Greg Heartsfield
2af5f9fbe8 fix: correct schema upgrade logic (and refactor)
Schema upgrades were buggy from 4->5 (the v5 would be skipped).  This
change also refactors the logic slightly so that future additions can
be clearer (no need to have if and else-if combinations).
2022-10-09 08:24:01 -05:00
Greg Heartsfield
2739e49362 fix: correct future schema version detection 2022-10-08 13:15:48 -05:00
Greg Heartsfield
f9693f7ac3 fix(NIP-9): hide events received after their deletions
fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/40
2022-10-08 12:12:41 -05:00
Greg Heartsfield
8a63d88b0b fix: prevent deletion of deletion events 2022-10-08 08:02:16 -05:00
Greg Heartsfield
a4df9445b6 test: improve port selection 2022-09-28 07:55:06 -05:00
Greg Heartsfield
92da9d71f8 feat: handle and log TERM signals 2022-09-28 07:20:31 -05:00
Greg Heartsfield
6633f8b472 feat: replace logging with tracing 2022-09-28 07:19:59 -05:00
Greg Heartsfield
93dfed0a87 refactor: misc clippy suggestions 2022-09-24 19:28:02 -05:00
Greg Heartsfield
bef7ca7e27 refactor: misc clippy suggestions 2022-09-24 09:19:16 -05:00
Greg Heartsfield
a98708ba47 refactor: misc clippy suggestions 2022-09-24 09:01:09 -05:00
Greg Heartsfield
ccf9b8d47b refactor: remove unnecessary return types 2022-09-24 08:39:41 -05:00
Greg Heartsfield
8fa58de49a refactor: clippy suggestions 2022-09-24 08:30:22 -05:00
Greg Heartsfield
480c5e4e58 docs: un-link NIP-22 note 2022-09-19 19:34:11 -05:00
dzdidi
5bd00f9107 docs: add refs for nostr-protocol organization
Signed-off-by: dzdidi <deniszalessky@gmail.com>
2022-09-19 19:26:36 -05:00
Greg Heartsfield
36b9f628c7 test: check for relay health after startup 2022-09-17 16:02:57 -05:00
Greg Heartsfield
baeb77af99 test: dynamically find open port for test relay 2022-09-17 14:36:05 -05:00
Greg Heartsfield
29b1e8ce58 refactor: move common test code into module 2022-09-17 12:37:49 -05:00
Greg Heartsfield
786a354776 test: simple integration test to start and stop relay 2022-09-11 12:54:24 -05:00
Greg Heartsfield
4fa8616c73 feat: enable use of tokio-console with diagnostics.tracing setting
View real-time tokio diagnostics by setting the configuration option
"diagnostics.tracing" to true.
2022-09-11 12:44:45 -05:00
Greg Heartsfield
74802522c2 improvement: do not create NIP-05 thread if feature is disabled 2022-09-11 11:01:36 -05:00
Greg Heartsfield
9ce5057af8 improvement: better log formatting 2022-09-11 10:22:01 -05:00
Greg Heartsfield
217429f538 build: add release flags, save artifacts 2022-09-11 10:21:29 -05:00
Greg Heartsfield
62a9548c27 docs: show build status for master branch only 2022-09-10 22:53:41 -05:00
Greg Heartsfield
c24dce8177 docs: add build status indicator 2022-09-10 22:48:23 -05:00
Greg Heartsfield
3503cf05ed build: add sr.ht build manifest 2022-09-10 22:43:56 -05:00
Greg Heartsfield
8738e5baa9 improvement: upgrade multiple dependencies
Updating aho-corasick v0.7.18 -> v0.7.19
Updating block-buffer v0.10.2 -> v0.10.3
Updating cpufeatures v0.2.4 -> v0.2.5
Updating form_urlencoded v1.0.1 -> v1.1.0
Updating idna v0.2.3 -> v0.3.0
Removing matches v0.1.9
Updating percent-encoding v2.1.0 -> v2.2.0
Updating thiserror v1.0.33 -> v1.0.34
Updating thiserror-impl v1.0.33 -> v1.0.34
Updating tokio-util v0.7.3 -> v0.7.4
Updating ucd-trie v0.1.4 -> v0.1.5
Updating url v2.2.2 -> v2.3.1
2022-09-10 22:42:52 -05:00
Greg Heartsfield
78da92ccca feat: advertise support for NIP-09 and NIP-12 in relay info
NIP-01 prefix search, and NIP-12 generic tags are no longer marked as
experimental.

NIP-11 relay info advertises NIP-09 event deletion and NIP-12 generic
tag search support.
2022-09-10 20:45:09 -05:00
Greg Heartsfield
72f1c19b21 feat(NIP-22): advertise support for event created_at limits
The `reject_future_limits` option can now be disabled, and is by
default.

NIP-11 advertises support for created_at limits.

The message for future-dated events has been modified, to be closer to
the recommended example in the NIP.
2022-09-10 20:40:10 -05:00
Greg Heartsfield
283967f8cc docs: reference NIP-28 channel 2022-09-10 19:45:23 -05:00
Greg Heartsfield
08b011ad07 feat: ensure that WAL is truncated after checkpoint 2022-09-10 19:18:57 -05:00
Greg Heartsfield
2b03f11e5e refactor: remove global/singleton settings object 2022-09-06 06:12:07 -05:00
Greg Heartsfield
e48bae10e6 feat: support in-memory SQLite database 2022-09-06 06:06:01 -05:00
Greg Heartsfield
8774416b92 refactor: move nostr server into library 2022-09-06 05:56:04 -05:00
Greg Heartsfield
59933ce25e build: add pre-commit config 2022-09-06 05:44:22 -05:00
Greg Heartsfield
1b9f364e15 chore: rustfmt 2022-09-02 12:38:31 -05:00
Greg Heartsfield
4d983dd1e0 improvement: upgrade uuid dependency 2022-09-02 12:37:11 -05:00
Greg Heartsfield
11c33582ef improvement: remove useless carats from Cargo.toml deps 2022-09-02 12:35:02 -05:00
Greg Heartsfield
a754477a02 improvement: misc refactorings (clippy) 2022-09-02 12:26:00 -05:00
Greg Heartsfield
a843eaa939 improvement: db.rs from clippy 2022-09-02 10:30:51 -05:00
Greg Heartsfield
03a130b0b8 improvement: simplify config builder (clippy) 2022-09-02 10:18:16 -05:00
Greg Heartsfield
9124f4540a improvement: upgrade multiple dependencies
Updating cpufeatures v0.2.3 -> v0.2.4
Updating dashmap v5.3.4 -> v5.4.0
Updating futures v0.3.23 -> v0.3.24
Updating futures-channel v0.3.23 -> v0.3.24
Updating futures-core v0.3.23 -> v0.3.24
Updating futures-executor v0.3.23 -> v0.3.24
Updating futures-io v0.3.23 -> v0.3.24
Updating futures-macro v0.3.23 -> v0.3.24
Updating futures-sink v0.3.23 -> v0.3.24
Updating futures-task v0.3.23 -> v0.3.24
Updating futures-util v0.3.23 -> v0.3.24
Updating httparse v1.7.1 -> v1.8.0
Updating lock_api v0.4.7 -> v0.4.8
Updating once_cell v1.13.1 -> v1.14.0
Updating pest v2.2.1 -> v2.3.0
Updating pest_derive v2.2.1 -> v2.3.0
Updating pest_generator v2.2.1 -> v2.3.0
Updating pest_meta v2.2.1 -> v2.3.0
Updating serde v1.0.143 -> v1.0.144
Updating serde_derive v1.0.143 -> v1.0.144
Updating serde_json v1.0.83 -> v1.0.85
Updating socket2 v0.4.4 -> v0.4.7
Updating thiserror v1.0.32 -> v1.0.33
Updating thiserror-impl v1.0.32 -> v1.0.33
Updating tokio v1.20.1 -> v1.21.0
2022-09-02 10:08:14 -05:00
slaninas
77892b2064 fix: syntax error 2022-08-22 05:12:52 -07:00
Greg Heartsfield
4fe6191aa3 chore: formatting 2022-08-21 09:51:34 -07:00
Greg Heartsfield
79a982e3ef improvement: send NOTICE for too-large messages 2022-08-21 09:28:31 -07:00
Greg Heartsfield
01d81db617 improvement: log client id for subscription removal 2022-08-21 09:11:38 -07:00
Greg Heartsfield
e6fef37d4e chore: rustfmt 2022-08-21 09:10:19 -07:00
plantimals
4bbfd77fc1 docs: add NGINX configuration example
resolves https://github.com/scsibug/nostr-rs-relay/issues/12
2022-08-20 09:35:01 -07:00
Greg Heartsfield
8da6f6555a build: bump version to 0.6.2 2022-08-18 17:52:16 -07:00
Greg Heartsfield
5bcc63bd56 improvement: upgrade multiple dependencies
Updating async-trait v0.1.56 -> v0.1.57
Removing block-buffer v0.7.3
Removing block-padding v0.1.5
Updating bumpalo v3.10.0 -> v3.11.0
Removing byte-tools v0.3.1
Updating bytes v1.1.0 -> v1.2.1
Updating cpufeatures v0.2.2 -> v0.2.3
Updating crossbeam-utils v0.8.10 -> v0.8.11
Updating crypto-common v0.1.4 -> v0.1.6
Removing digest v0.8.1
Removing fake-simd v0.1.2
Updating fastrand v1.7.0 -> v1.8.0
Updating futures v0.3.21 -> v0.3.23
Updating futures-channel v0.3.21 -> v0.3.23
Updating futures-core v0.3.21 -> v0.3.23
Updating futures-executor v0.3.21 -> v0.3.23
Updating futures-io v0.3.21 -> v0.3.23
Updating futures-macro v0.3.21 -> v0.3.23
Updating futures-sink v0.3.21 -> v0.3.23
Updating futures-task v0.3.21 -> v0.3.23
Updating futures-util v0.3.21 -> v0.3.23
Removing generic-array v0.12.4
Removing generic-array v0.14.5
Adding   generic-array v0.14.6
Updating h2 v0.3.13 -> v0.3.14
Updating hashbrown v0.12.1 -> v0.12.3
Updating hyper v0.14.19 -> v0.14.20
Updating itoa v1.0.2 -> v1.0.3
Updating js-sys v0.3.58 -> v0.3.59
Updating libc v0.2.126 -> v0.2.132
Removing maplit v1.0.2
Updating once_cell v1.12.1 -> v1.13.1
Removing opaque-debug v0.2.3
Updating openssl v0.10.40 -> v0.10.41
Updating openssl-sys v0.9.74 -> v0.9.75
Updating pest v2.1.3 -> v2.2.1
Updating pest_derive v2.1.0 -> v2.2.1
Updating pest_generator v2.1.3 -> v2.2.1
Updating pest_meta v2.1.3 -> v2.2.1
Updating proc-macro2 v1.0.40 -> v1.0.43
Updating quote v1.0.20 -> v1.0.21
Updating raw-cpuid v10.3.0 -> v10.5.0
Updating redox_syscall v0.2.13 -> v0.2.16
Updating regex v1.5.6 -> v1.6.0
Updating regex-syntax v0.6.26 -> v0.6.27
Updating ryu v1.0.10 -> v1.0.11
Updating security-framework v2.6.1 -> v2.7.0
Updating serde v1.0.138 -> v1.0.143
Updating serde_derive v1.0.138 -> v1.0.143
Updating serde_json v1.0.82 -> v1.0.83
Removing sha-1 v0.8.2
Updating slab v0.4.6 -> v0.4.7
Updating syn v1.0.98 -> v1.0.99
Updating thiserror v1.0.31 -> v1.0.32
Updating thiserror-impl v1.0.31 -> v1.0.32
Updating tokio v1.19.2 -> v1.20.1
Updating tokio-tungstenite v0.17.1 -> v0.17.2
Updating tracing v0.1.35 -> v0.1.36
Updating tracing-core v0.1.28 -> v0.1.29
Updating tungstenite v0.17.2 -> v0.17.3
Updating ucd-trie v0.1.3 -> v0.1.4
Updating unicode-ident v1.0.1 -> v1.0.3
Updating wasm-bindgen v0.2.81 -> v0.2.82
Updating wasm-bindgen-backend v0.2.81 -> v0.2.82
Updating wasm-bindgen-macro v0.2.81 -> v0.2.82
Updating wasm-bindgen-macro-support v0.2.81 -> v0.2.82
Updating wasm-bindgen-shared v0.2.81 -> v0.2.82
Updating web-sys v0.3.58 -> v0.3.59
2022-08-18 17:21:53 -07:00
Greg Heartsfield
035cf34673 fix(NIP-12): correctly search for mixed-case hex-like tags
Only lowercase and even-length tag values are stored as binary BLOBs.
Previously there was an error which search results from being returned
if the tag value was mixed-case and could be interpreted as hex.

A new database migration has been created to repair the `tag` table
for existing relays.

fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/37
2022-08-17 16:34:11 -07:00
Greg Heartsfield
be8170342e fix(NIP-12): multi-tag searches returns correct results
Logic of generated SQL was incorrect, causing multiple tag searches
(as defined in NIP-12) to produce no results.

fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/36
2022-08-11 22:16:10 -07:00
Greg Heartsfield
0a3b15f41f fix(NIP-11): Add CORS header and content type for main page 2022-08-11 19:33:17 -07:00
Kirill Kovalenko
2b4b17dbda fix: windows compilation with bundled sqlite3
Using 'bundled' is recommended by
https://github.com/rusqlite/rusqlite#usage to avoid common build
issues
2022-08-07 10:35:36 -05:00
Greg Heartsfield
5058d98ad6 fix(NIP-12): only allow single-char tag filters 2022-08-07 10:15:36 -05:00
Greg Heartsfield
f4ecd43708 build: bump version to 0.6.1 2022-07-04 17:41:16 -05:00
Greg Heartsfield
a8f465fdc8 improvement: upgrade docker base images (and specify explicit repository) 2022-07-04 17:35:17 -05:00
Greg Heartsfield
1c14adc766 fix(NIP-01): allow limits on a per-filter basis
The original implementation of subscription limit applied to the
entire query, instead of the specific filter.  Now, each filter gets
its own query limit.  When a limit is applied, the most recent N
events will be returned, otherwise the default is to return the
earliest events (in order), for all matching events.
2022-07-04 17:25:32 -05:00
Greg Heartsfield
e894a86566 docs: NIP-15, NIP-16 feature notes in README 2022-07-04 13:10:48 -05:00
Greg Heartsfield
bedc378624 improvement: upgrade multiple dependencies
Updating async-trait v0.1.53 -> v0.1.56
Updating bumpalo v3.9.1 -> v3.10.0
Updating crossbeam-utils v0.8.8 -> v0.8.10
Updating crypto-common v0.1.3 -> v0.1.4
Updating getrandom v0.2.6 -> v0.2.7
Updating http v0.2.7 -> v0.2.8
Updating indexmap v1.8.2 -> v1.9.1
Updating js-sys v0.3.57 -> v0.3.58
Updating linked-hash-map v0.5.4 -> v0.5.6
Updating mio v0.8.3 -> v0.8.4
Updating once_cell v1.12.0 -> v1.12.1
Updating openssl-sys v0.9.73 -> v0.9.74
Removing parking_lot v0.11.2
Removing parking_lot_core v0.8.5
Updating proc-macro2 v1.0.39 -> v1.0.40
Updating quote v1.0.18 -> v1.0.20
Updating r2d2 v0.8.9 -> v0.8.10
Updating ron v0.7.0 -> v0.7.1
Updating serde v1.0.137 -> v1.0.138
Updating serde_derive v1.0.137 -> v1.0.138
Updating serde_json v1.0.81 -> v1.0.82
Updating smallvec v1.8.0 -> v1.9.0
Updating syn v1.0.95 -> v1.0.98
Updating tokio v1.18.2 -> v1.19.2
Updating tokio-macros v1.7.0 -> v1.8.0
Updating tokio-util v0.7.2 -> v0.7.3
Updating tower-service v0.3.1 -> v0.3.2
Updating tracing v0.1.34 -> v0.1.35
Removing tracing-attributes v0.1.21
Updating tracing-core v0.1.26 -> v0.1.28
Updating unicode-ident v1.0.0 -> v1.0.1
Updating unicode-normalization v0.1.19 -> v0.1.21
Updating wasm-bindgen v0.2.80 -> v0.2.81
Updating wasm-bindgen-backend v0.2.80 -> v0.2.81
Updating wasm-bindgen-macro v0.2.80 -> v0.2.81
Updating wasm-bindgen-macro-support v0.2.80 -> v0.2.81
Updating wasm-bindgen-shared v0.2.80 -> v0.2.81
Updating web-sys v0.3.57 -> v0.3.58
2022-07-04 12:56:10 -05:00
Greg Heartsfield
e1c2a6b758 improvement: upgrade docker base image 2022-05-30 21:53:46 -05:00
Greg Heartsfield
990bb656e8 improvement: upgrade multiple dependencies
Cargo updated the following dependencies:

Updating dashmap v5.3.3 -> v5.3.4
Updating http-body v0.4.4 -> v0.4.5
Updating hyper v0.14.18 -> v0.14.19
Updating indexmap v1.8.1 -> v1.8.2
Updating itoa v1.0.1 -> v1.0.2
Updating libc v0.2.125 -> v0.2.126
Updating once_cell v1.10.0 -> v1.12.0
Updating parking_lot v0.12.0 -> v0.12.1
Updating proc-macro2 v1.0.38 -> v1.0.39
Updating regex v1.5.5 -> v1.5.6
Updating regex-syntax v0.6.25 -> v0.6.26
Updating ryu v1.0.9 -> v1.0.10
Updating schannel v0.1.19 -> v0.1.20
Updating scheduled-thread-pool v0.2.5 -> v0.2.6
Updating syn v1.0.93 -> v1.0.95
Updating tokio-util v0.7.1 -> v0.7.2

Adding unicode-ident v1.0.0

Removing unicode-xid v0.2.3
2022-05-30 21:47:24 -05:00
Semisol
168cfc3b26 feat(NIP-16): Implement NIP16
NIP16 introduces a replaceable and ephemeral event range:
[10000..20000) for replaceable and [20000..30000) for
ephemeral.
2022-05-30 21:43:06 -05:00
Semisol
a36ad378f6 feat(NIP-15): Implement NIP15
NIP15 sends an EOSE notice to clients after all stored events are sent
to allow loading indicators and other use cases.
2022-05-30 21:43:00 -05:00
Greg Heartsfield
538d139ebf improvement: upgrade docker base image 2022-05-10 21:24:22 -05:00
Greg Heartsfield
23f7730fea build: bump version to 0.6.0 2022-05-10 21:19:21 -05:00
Greg Heartsfield
8aa1256254 improvement: upgrade multiple dependencies 2022-05-10 17:07:18 -05:00
Greg Heartsfield
9ed3391b46 fix(NIP-09): correct WHERE clause for event deletion 2022-05-10 16:50:52 -05:00
William Casarin
4ad483090e feat(NIP-01): Implement limit
This was quickly sneaked in by fiatjaf per my request[0], it makes many
queries more efficient and allows for paging when combined with until.

It is a bit weird to have multiple limits on each filter... for now we
just choose any or the last limit seen.

[0]: a4aea5337f

Signed-off-by: William Casarin <jb55@jb55.com>
2022-05-10 16:47:56 -05:00
Greg Heartsfield
9b351aab9b docs: update devel discussion link 2022-02-28 17:19:24 -06:00
Greg Heartsfield
597749890e improvement: remove unnecessary event logging 2022-02-27 19:30:48 -06:00
Greg Heartsfield
1d499cf12b feat: handle NIP-09 for deletion events 2022-02-27 11:35:23 -06:00
Greg Heartsfield
ed3a6b9692 refactor: simplify NOTICE messages 2022-02-26 17:34:58 -06:00
Greg Heartsfield
048199e30b build: bump version to 0.5.2 2022-02-26 11:22:16 -06:00
Greg Heartsfield
414e83f696 refactor: import cleanup for config 2022-02-26 11:16:12 -06:00
Greg Heartsfield
225c8f762e improvement: upgrade dependencies; config, tungstenite, tokio 2022-02-26 09:55:12 -06:00
Greg Heartsfield
887fc28ab2 fix: until filters in subscriptions now used 2022-02-26 09:15:45 -06:00
Greg Heartsfield
294d3b99c3 fix: correct imports for test cases 2022-02-26 09:07:07 -06:00
Greg Heartsfield
53990672ae improvement: move db pool operations closer to query, do not panic on failure 2022-02-23 16:38:16 -06:00
Greg Heartsfield
9c1b21cbfe improvement: more granular perf logging for SQL queries 2022-02-21 09:03:05 -06:00
Greg Heartsfield
2f63417646 improvement: better logging for connection resets 2022-02-21 08:57:07 -06:00
Greg Heartsfield
3b25160852 fix: abort on connection IO errors 2022-02-21 08:50:46 -06:00
Greg Heartsfield
34ad549cde fix: update event buffer size comment in config 2022-02-20 11:46:24 -06:00
Greg Heartsfield
f8b1fe5035 docs: line up comments with code 2022-02-17 16:18:05 -06:00
30 changed files with 4192 additions and 1537 deletions

19
.build.yml Normal file
View File

@@ -0,0 +1,19 @@
image: fedora/latest
arch: x86_64
artifacts:
- nostr-rs-relay/target/release/nostr-rs-relay
environment:
RUST_LOG: debug
packages:
- cargo
- sqlite-devel
sources:
- https://git.sr.ht/~gheartsfield/nostr-rs-relay/
shell: false
tasks:
- build: |
cd nostr-rs-relay
cargo build --release
- test: |
cd nostr-rs-relay
cargo test --release

2
.cargo/config.toml Normal file
View File

@@ -0,0 +1,2 @@
[build]
rustflags = ["--cfg", "tokio_unstable"]

16
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,16 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/doublify/pre-commit-rust
rev: v1.0
hooks:
- id: fmt
- id: cargo-check
- id: clippy

1601
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,32 +1,46 @@
[package]
name = "nostr-rs-relay"
version = "0.5.1"
version = "0.7.13"
edition = "2021"
authors = ["Greg Heartsfield <scsibug@imap.cc>"]
description = "A relay implementation for the Nostr protocol"
readme = "README.md"
homepage = "https://sr.ht/~gheartsfield/nostr-rs-relay/"
repository = "https://git.sr.ht/~gheartsfield/nostr-rs-relay"
license = "MIT"
keywords = ["nostr", "server"]
categories = ["network-programming", "web-programming"]
[dependencies]
log = "^0.4"
env_logger = "^0.9"
tokio = { version = "^1.16", features = ["full"] }
futures = "^0.3"
futures-util = "^0.3"
tokio-tungstenite = "^0.16"
tungstenite = "^0.16"
thiserror = "^1"
uuid = { version = "^0.8", features = ["v4"] }
config = { version = "0.11", features = ["toml"] }
bitcoin_hashes = { version = "^0.9", features = ["serde"] }
secp256k1 = {version = "^0.21", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
serde = { version = "^1.0", features = ["derive"] }
serde_json = {version = "^1.0", features = ["preserve_order"]}
hex = "^0.4"
rusqlite = { version = "^0.26", features = ["limits"]}
r2d2 = "^0.8"
r2d2_sqlite = "^0.19"
lazy_static = "^1.4"
governor = "^0.4"
nonzero_ext = "^0.3"
tracing = "0.1.36"
tracing-subscriber = "0.2.0"
tokio = { version = "1", features = ["full", "tracing", "signal"] }
console-subscriber = "0.1.8"
futures = "0.3"
futures-util = "0.3"
tokio-tungstenite = "0.17"
tungstenite = "0.17"
thiserror = "1"
uuid = { version = "1.1.2", features = ["v4"] }
config = { version = "0.12", features = ["toml"] }
bitcoin_hashes = { version = "0.10", features = ["serde"] }
secp256k1 = {version = "0.21", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = {version = "1.0", features = ["preserve_order"]}
hex = "0.4"
rusqlite = { version = "0.26", features = ["limits","bundled"]}
r2d2 = "0.8"
r2d2_sqlite = "0.19"
lazy_static = "1.4"
governor = "0.4"
nonzero_ext = "0.3"
hyper = { version="0.14", features=["client", "server","http1","http2","tcp"] }
hyper-tls = "^0.5"
http = { version = "^0.2" }
parse_duration = "^2"
rand = "^0.8"
hyper-tls = "0.5"
http = { version = "0.2" }
parse_duration = "2"
rand = "0.8"
const_format = "0.2.28"
regex = "1"
[dev-dependencies]
anyhow = "1"

View File

@@ -1,18 +1,24 @@
FROM rust:1.58.1 as builder
FROM docker.io/library/rust:1.66.0@sha256:359949280cebefe93ccb33089fe25111a3aadfe99eac4b6cbe8ec3e1b571dacb as builder
RUN USER=root cargo install cargo-auditable
RUN USER=root cargo new --bin nostr-rs-relay
WORKDIR ./nostr-rs-relay
COPY ./Cargo.toml ./Cargo.toml
COPY ./Cargo.lock ./Cargo.lock
RUN cargo build --release
# build dependencies only (caching)
RUN cargo auditable build --release --locked
# get rid of starter project code
RUN rm src/*.rs
# copy project source code
COPY ./src ./src
# build auditable release using locked deps
RUN rm ./target/release/deps/nostr*relay*
RUN cargo build --release
RUN cargo auditable build --release --locked
FROM docker.io/library/debian:bullseye-20221205-slim@sha256:25f10b4f1ded5341a3ca0a30290ff3cd5639415f0c5a2222d5e7d5dd72952aa1
FROM debian:bullseye-20220125-slim
ARG APP=/usr/src/app
ARG APP_DATA=/usr/src/app/db
RUN apt-get update \
@@ -36,7 +42,7 @@ RUN chown -R $APP_USER:$APP_USER ${APP}
USER $APP_USER
WORKDIR ${APP}
ENV RUST_LOG=info
ENV RUST_LOG=info,nostr_rs_relay=info
ENV APP_DATA=${APP_DATA}
CMD ./nostr-rs-relay --db ${APP_DATA}

View File

@@ -1,26 +1,34 @@
# [nostr-rs-relay](https://git.sr.ht/~gheartsfield/nostr-rs-relay)
This is a [nostr](https://github.com/fiatjaf/nostr) relay, written in
Rust. It currently supports the entire relay protocol, and has a
SQLite persistence layer.
This is a [nostr](https://github.com/nostr-protocol/nostr) relay,
written in Rust. It currently supports the entire relay protocol, and
persists data with SQLite.
The project master repository is available on
[sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is
mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
[![builds.sr.ht status](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master.svg)](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master?)
## Features
NIPs with a relay-specific implementation are listed here.
[NIPs](https://github.com/nostr-protocol/nips) with a relay-specific implementation are listed here.
- [x] NIP-01: Core event model
- [x] NIP-01: Hide old metadata events
- [x] NIP-01: Id/Author prefix search (_experimental_)
- [x] NIP-02: Hide old contact list events
- [ ] NIP-03: OpenTimestamps
- [x] NIP-05: Mapping Nostr keys to DNS identifiers
- [ ] NIP-09: Event deletion
- [x] NIP-11: Relay information document
- [x] NIP-12: Generic tag search (_experimental_)
- [x] NIP-01: [Basic protocol flow description](https://github.com/nostr-protocol/nips/blob/master/01.md)
* Core event model
* Hide old metadata events
* Id/Author prefix search
- [x] NIP-02: [Contact List and Petnames](https://github.com/nostr-protocol/nips/blob/master/02.md)
- [ ] NIP-03: [OpenTimestamps Attestations for Events](https://github.com/nostr-protocol/nips/blob/master/03.md)
- [x] NIP-05: [Mapping Nostr keys to DNS-based internet identifiers](https://github.com/nostr-protocol/nips/blob/master/05.md)
- [x] NIP-09: [Event Deletion](https://github.com/nostr-protocol/nips/blob/master/09.md)
- [x] NIP-11: [Relay Information Document](https://github.com/nostr-protocol/nips/blob/master/11.md)
- [x] NIP-12: [Generic Tag Queries](https://github.com/nostr-protocol/nips/blob/master/12.md)
- [x] NIP-15: [End of Stored Events Notice](https://github.com/nostr-protocol/nips/blob/master/15.md)
- [x] NIP-16: [Event Treatment](https://github.com/nostr-protocol/nips/blob/master/16.md)
- [x] NIP-20: [Command Results](https://github.com/nostr-protocol/nips/blob/master/20.md)
- [x] NIP-22: [Event `created_at` limits](https://github.com/nostr-protocol/nips/blob/master/22.md) (_future-dated events only_)
- [x] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md)
## Quick Start
@@ -29,15 +37,32 @@ application. Use a bind mount to store the SQLite database outside of
the container image, and map the container's 8080 port to a host port
(7000 in the example below).
The examples below start a rootless podman container, mapping a local
data directory and config file.
```console
$ docker build -t nostr-rs-relay .
$ podman build -t nostr-rs-relay .
$ docker run -it -p 7000:8080 \
--mount src=$(pwd)/data,target=/usr/src/app/db,type=bind nostr-rs-relay
$ mkdir data
[2021-12-31T19:58:31Z INFO nostr_rs_relay] listening on: 0.0.0.0:8080
[2021-12-31T19:58:31Z INFO nostr_rs_relay::db] opened database "/usr/src/app/db/nostr.db" for writing
[2021-12-31T19:58:31Z INFO nostr_rs_relay::db] DB version = 2
$ podman unshare chown 100:100 data
$ podman run -it --rm -p 7000:8080 \
--user=100:100 \
-v $(pwd)/data:/usr/src/app/db:Z \
-v $(pwd)/config.toml:/usr/src/app/config.toml:ro,Z \
--name nostr-relay nostr-rs-relay:latest
Nov 19 15:31:15.013 INFO nostr_rs_relay: Starting up from main
Nov 19 15:31:15.017 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080
Nov 19 15:31:15.019 INFO nostr_rs_relay::server: db writer created
Nov 19 15:31:15.019 INFO nostr_rs_relay::server: control message listener started
Nov 19 15:31:15.019 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=4)
Nov 19 15:31:15.019 INFO nostr_rs_relay::db: opened database "/usr/src/app/db/nostr.db" for writing
Nov 19 15:31:15.019 INFO nostr_rs_relay::schema: DB version = 0
Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: database pragma/schema initialized to v7, and ready
Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: All migration scripts completed successfully. Welcome to v7.
Nov 19 15:31:15.521 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=128)
```
Use a `nostr` client such as
@@ -79,8 +104,13 @@ termination, load balancing, and other features), see [Reverse
Proxy](reverse-proxy.md).
## Dev Channel
The current dev discussions for this project is happening at https://discord.gg/ufG6fH52Vk.
Drop in to query any development related questions.
For development discussions, please feel free to use the [sourcehut
mailing list](https://lists.sr.ht/~gheartsfield/nostr-rs-relay-devel).
Or, drop by the [Nostr Telegram Channel](https://t.me/nostr_protocol).
To chat about `nostr-rs-relay` on `nostr` itself; visit our channel on [anigma](https://anigma.io/) or another client that supports [NIP-28](https://github.com/nostr-protocol/nips/blob/master/28.md) chats:
* `2ad246a094fee48c6e455dd13d759d5f41b5a233120f5719d81ebc1935075194`
License
---

View File

@@ -16,12 +16,21 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
# Administrative contact URI
#contact = "mailto:contact@example.com"
[diagnostics]
# Enable tokio tracing (for use with tokio-console)
#tracing = true
[database]
# Directory for SQLite files. Defaults to the current directory. Can
# also be specified (and overriden) with the "--db dirname" command
# line option.
data_directory = "."
# Use an in-memory database instead of 'nostr.db'.
# Caution; this will not survive a process restart!
#in_memory = false
# Database connection pool settings for subscribers:
# Minimum number of SQLite reader connections
@@ -37,17 +46,41 @@ address = "0.0.0.0"
# Listen on this port
port = 8080
# If present, read this HTTP header for logging client IP addresses.
# Examples for common proxies, cloudflare:
#remote_ip_header = "x-forwarded-for"
#remote_ip_header = "cf-connecting-ip"
# Websocket ping interval in seconds, defaults to 5 minutes
#ping_interval = 300
[options]
# Reject events that have timestamps greater than this many seconds in
# the future. Defaults to rejecting anything greater than 30 minutes
# from the current time.
# the future. Recommended to reject anything greater than 30 minutes
# from the current time, but the default is to allow any date.
reject_future_seconds = 1800
[limits]
# Limit events created per second, averaged over one minute. Must be
# an integer. If not set (or set to 0), defaults to unlimited.
# an integer. If not set (or set to 0), defaults to unlimited. Note:
# this is for the server as a whole, not per-connection.
# messages_per_sec = 0
# Limit client subscriptions created per second, averaged over one
# minute. Must be an integer. If not set (or set to 0), defaults to
# unlimited.
#subscriptions_per_min = 0
# UNIMPLEMENTED...
# Limit how many concurrent database connections a client can have.
# This prevents a single client from starting too many expensive
# database queries. Must be an integer. If not set (or set to 0),
# defaults to unlimited (subject to subscription limits).
#db_conns_per_client = 0
# Limit blocking threads used for database connections. Defaults to 16.
#max_blocking_threads = 16
# Limit the maximum size of an EVENT message. Defaults to 128 KB.
# Set to 0 for unlimited.
#max_event_bytes = 131072
@@ -63,8 +96,8 @@ reject_future_seconds = 1800
#broadcast_buffer = 16384
# Event persistence buffer size, in number of events. This provides
# backpressure to senders if writes are slow. Defaults to 16.
#event_persist_buffer = 16
# backpressure to senders if writes are slow.
#event_persist_buffer = 4096
[authorization]
# Pubkey addresses in this array are whitelisted for event publishing.

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env bash
sed -E 's/@sha256:[[:alnum:]]+//g' Dockerfile > Dockerfile.any-platform
echo "Created platform-agnostic Dockerfile in 'Dockerfile.any-platform'"

View File

@@ -1,8 +1,8 @@
# Reverse Proxy Setup Guide
It is recommended to run `nostr-rs-relay` behind a reverse proxy such
as `haproxy` or `nginx` to provide TLS termination. A simple example
of an `haproxy` configuration is documented here.
as `haproxy` or `nginx` to provide TLS termination. Simple examples
of `haproxy` and `nginx` configurations are documented here.
## Minimal HAProxy Configuration
@@ -46,8 +46,47 @@ backend relay
server relay 127.0.0.1:8080
```
### Notes
### HAProxy Notes
You may experience WebSocket connection problems with Firefox if
HTTP/2 is enabled, for older versions of HAProxy (2.3.x). Either
disable HTTP/2 (`h2`), or upgrade HAProxy.
## Bare-bones Nginx Configuration
Assumptions:
* `Nginx` version is `1.18.0` (other versions not tested).
* Hostname for the relay is `relay.example.com`.
* SSL certificate and key are located at `/etc/letsencrypt/live/relay.example.com/`.
* Relay is running on port `8080`.
```
http {
server {
listen 443 ssl;
server_name relay.example.com;
ssl_certificate /etc/letsencrypt/live/relay.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/relay.example.com/privkey.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;
keepalive_timeout 70;
location / {
proxy_pass http://localhost:8080;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
}
}
}
```
### Nginx Notes
The above configuration was tested on `nginx` `1.18.0` was tested on `Ubuntu 20.04`.
For help installing `nginx` on `Ubuntu`, see [this guide](https://www.digitalocean.com/community/tutorials/how-to-install-nginx-on-ubuntu-20-04).
For guidance on using `letsencrypt` to obtain a cert on `Ubuntu`, including an `nginx` plugin, see [this post](https://www.digitalocean.com/community/tutorials/how-to-secure-nginx-with-let-s-encrypt-on-ubuntu-20-04).

View File

@@ -1 +1,4 @@
edition = "2018"
edition = "2021"
#max_width = 140
#chain_width = 100
#fn_call_width = 100

View File

@@ -5,7 +5,7 @@ use crate::error::{Error, Result};
use serde::{Deserialize, Serialize};
/// Close command in network format
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct CloseCmd {
/// Protocol command, expected to always be "CLOSE".
cmd: String,
@@ -14,7 +14,7 @@ pub struct CloseCmd {
}
/// Identifier of the subscription to be closed.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct Close {
/// The subscription identifier being closed.
pub id: String,
@@ -23,10 +23,10 @@ pub struct Close {
impl From<CloseCmd> for Result<Close> {
fn from(cc: CloseCmd) -> Result<Close> {
// ensure command is correct
if cc.cmd != "CLOSE" {
Err(Error::CommandUnknownError)
} else {
if cc.cmd == "CLOSE" {
Ok(Close { id: cc.id })
} else {
Err(Error::CommandUnknownError)
}
}
}

View File

@@ -1,14 +1,8 @@
//! Configuration file and settings management
use lazy_static::lazy_static;
use log::*;
use config::{Config, ConfigError, File};
use serde::{Deserialize, Serialize};
use std::sync::RwLock;
use std::time::Duration;
// initialize a singleton default configuration
lazy_static! {
pub static ref SETTINGS: RwLock<Settings> = RwLock::new(Settings::default());
}
use tracing::warn;
#[derive(Debug, Serialize, Deserialize, Clone)]
#[allow(unused)]
@@ -20,29 +14,31 @@ pub struct Info {
pub contact: Option<String>,
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Database {
pub data_directory: String,
pub in_memory: bool,
pub min_conn: u32,
pub max_conn: u32,
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Network {
pub port: u16,
pub address: String,
pub remote_ip_header: Option<String>, // retrieve client IP from this HTTP header if present
pub ping_interval_seconds: u32,
}
//
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Options {
pub reject_future_seconds: Option<usize>, // if defined, reject any events with a timestamp more than X seconds in the future
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Retention {
// TODO: implement
@@ -52,10 +48,13 @@ pub struct Retention {
pub whitelist_addresses: Option<Vec<String>>, // whitelisted addresses (never delete)
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Limits {
pub messages_per_sec: Option<u32>, // Artificially slow down event writing to limit disk consumption (averaged over 1 minute)
pub subscriptions_per_min: Option<u32>, // Artificially slow down request (db query) creation to prevent abuse (averaged over 1 minute)
pub db_conns_per_client: Option<u32>, // How many concurrent database queries (not subscriptions) may a client have?
pub max_blocking_threads: usize,
pub max_event_bytes: Option<usize>, // Maximum size of an EVENT message
pub max_ws_message_bytes: Option<usize>,
pub max_ws_frame_bytes: Option<usize>,
@@ -63,13 +62,19 @@ pub struct Limits {
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Authorization {
pub pubkey_whitelist: Option<Vec<String>>, // If present, only allow these pubkeys to publish events
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Diagnostics {
pub tracing: bool, // enables tokio console-subscriber
}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy)]
#[serde(rename_all = "lowercase")]
pub enum VerifiedUsersMode {
Enabled,
@@ -77,7 +82,7 @@ pub enum VerifiedUsersMode {
Disabled,
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct VerifiedUsers {
pub mode: VerifiedUsersMode, // Mode of operation: "enabled" (enforce) or "passive" (check only). If none, this is simply disabled.
@@ -96,37 +101,46 @@ impl VerifiedUsers {
self.verify_update_frequency_duration = self.verify_update_duration();
}
#[must_use]
pub fn is_enabled(&self) -> bool {
self.mode == VerifiedUsersMode::Enabled
}
#[must_use]
pub fn is_active(&self) -> bool {
self.mode == VerifiedUsersMode::Enabled || self.mode == VerifiedUsersMode::Passive
}
#[must_use]
pub fn is_passive(&self) -> bool {
self.mode == VerifiedUsersMode::Passive
}
#[must_use]
pub fn verify_expiration_duration(&self) -> Option<Duration> {
self.verify_expiration
.as_ref()
.and_then(|x| parse_duration::parse(x).ok())
}
#[must_use]
pub fn verify_update_duration(&self) -> Option<Duration> {
self.verify_update_frequency
.as_ref()
.and_then(|x| parse_duration::parse(x).ok())
}
#[must_use]
pub fn is_valid(&self) -> bool {
self.verify_expiration_duration().is_some() && self.verify_update_duration().is_some()
}
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Settings {
pub info: Info,
pub diagnostics: Diagnostics,
pub database: Database,
pub network: Network,
pub limits: Limits,
@@ -137,39 +151,41 @@ pub struct Settings {
}
impl Settings {
#[must_use]
pub fn new() -> Self {
let d = Self::default();
let default_settings = Self::default();
// attempt to construct settings with file
// Self::new_from_default(&d).unwrap_or(d)
let from_file = Self::new_from_default(&d);
let from_file = Self::new_from_default(&default_settings);
match from_file {
Ok(f) => f,
Err(e) => {
warn!("Error reading config file ({:?})", e);
d
default_settings
}
}
}
fn new_from_default(default: &Settings) -> Result<Self, config::ConfigError> {
let config: config::Config = config::Config::new();
let mut settings: Settings = config
fn new_from_default(default: &Settings) -> Result<Self, ConfigError> {
let builder = Config::builder();
let config: Config = builder
// use defaults
.with_merged(config::Config::try_from(default).unwrap())?
.add_source(Config::try_from(default)?)
// override with file contents
.with_merged(config::File::with_name("config"))?
.try_into()?;
.add_source(File::with_name("config.toml"))
.build()?;
let mut settings: Settings = config.try_deserialize()?;
// ensure connection pool size is logical
if settings.database.min_conn > settings.database.max_conn {
panic!(
assert!(
settings.database.min_conn <= settings.database.max_conn,
"Database min_conn setting ({}) cannot exceed max_conn ({})",
settings.database.min_conn, settings.database.max_conn
settings.database.min_conn,
settings.database.max_conn
);
}
// ensure durations parse
if !settings.verified_users.is_valid() {
panic!("VerifiedUsers time settings could not be parsed");
}
assert!(
settings.verified_users.is_valid(),
"VerifiedUsers time settings could not be parsed"
);
// initialize durations for verified users
settings.verified_users.init();
Ok(settings)
@@ -186,17 +202,24 @@ impl Default for Settings {
pubkey: None,
contact: None,
},
diagnostics: Diagnostics { tracing: false },
database: Database {
data_directory: ".".to_owned(),
in_memory: false,
min_conn: 4,
max_conn: 128,
},
network: Network {
port: 8080,
ping_interval_seconds: 300,
address: "0.0.0.0".to_owned(),
remote_ip_header: None,
},
limits: Limits {
messages_per_sec: None,
subscriptions_per_min: None,
db_conns_per_client: None,
max_blocking_threads: 16,
max_event_bytes: Some(2 << 17), // 128K
max_ws_message_bytes: Some(2 << 17), // 128K
max_ws_frame_bytes: Some(2 << 17), // 128K
@@ -223,7 +246,7 @@ impl Default for Settings {
whitelist_addresses: None, // whitelisted addresses (never delete)
},
options: Options {
reject_future_seconds: Some(30 * 60), // Reject events 30min in the future or greater
reject_future_seconds: None, // Reject events in the future if defined
},
}
}

View File

@@ -2,11 +2,10 @@
use crate::close::Close;
use crate::error::Error;
use crate::error::Result;
use crate::event::Event;
use crate::subscription::Subscription;
use log::*;
use std::collections::HashMap;
use tracing::{debug, trace};
use uuid::Uuid;
/// A subscription identifier has a maximum length
@@ -14,6 +13,8 @@ const MAX_SUBSCRIPTION_ID_LEN: usize = 256;
/// State for a client connection
pub struct ClientConn {
/// Client IP (either from socket, or configured proxy header
client_ip: String,
/// Unique client identifier generated at connection time
client_id: Uuid,
/// The current set of active client subscriptions
@@ -24,46 +25,56 @@ pub struct ClientConn {
impl Default for ClientConn {
fn default() -> Self {
Self::new()
Self::new("unknown".to_owned())
}
}
impl ClientConn {
/// Create a new, empty connection state.
pub fn new() -> Self {
#[must_use]
pub fn new(client_ip: String) -> Self {
let client_id = Uuid::new_v4();
ClientConn {
client_ip,
client_id,
subscriptions: HashMap::new(),
max_subs: 32,
}
}
pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
&self.subscriptions
}
/// Check if the given subscription already exists
pub fn has_subscription(&self, sub: &Subscription) -> bool {
self.subscriptions.values().any(|x| x == sub)
}
/// Get a short prefix of the client's unique identifier, suitable
/// for logging.
#[must_use]
pub fn get_client_prefix(&self) -> String {
self.client_id.to_string().chars().take(8).collect()
}
/// Find all matching subscriptions.
pub fn get_matching_subscriptions(&self, e: &Event) -> Vec<&str> {
let mut v: Vec<&str> = vec![];
for (id, sub) in self.subscriptions.iter() {
if sub.interested_in_event(e) {
v.push(id);
}
}
v
#[must_use]
pub fn ip(&self) -> &str {
&self.client_ip
}
/// Add a new subscription for this connection.
/// # Errors
///
/// Will return `Err` if the client has too many subscriptions, or
/// if the provided name is excessively long.
pub fn subscribe(&mut self, s: Subscription) -> Result<()> {
let k = s.get_id();
let sub_id_len = k.len();
// prevent arbitrarily long subscription identifiers from
// being used.
if sub_id_len > MAX_SUBSCRIPTION_ID_LEN {
info!(
debug!(
"ignoring sub request with excessive length: ({})",
sub_id_len
);
@@ -72,8 +83,12 @@ impl ClientConn {
// check if an existing subscription exists, and replace if so
if self.subscriptions.contains_key(&k) {
self.subscriptions.remove(&k);
self.subscriptions.insert(k, s);
debug!("replaced existing subscription");
self.subscriptions.insert(k, s.clone());
trace!(
"replaced existing subscription (cid: {}, sub: {:?})",
self.get_client_prefix(),
s.get_id()
);
return Ok(());
}
@@ -83,20 +98,22 @@ impl ClientConn {
}
// add subscription
self.subscriptions.insert(k, s);
debug!(
"registered new subscription, currently have {} active subs",
self.subscriptions.len()
trace!(
"registered new subscription, currently have {} active subs (cid: {})",
self.subscriptions.len(),
self.get_client_prefix(),
);
Ok(())
}
/// Remove the subscription for this connection.
pub fn unsubscribe(&mut self, c: Close) {
pub fn unsubscribe(&mut self, c: &Close) {
// TODO: return notice if subscription did not exist.
self.subscriptions.remove(&c.id);
debug!(
"removed subscription, currently have {} active subs",
self.subscriptions.len()
trace!(
"removed subscription, currently have {} active subs (cid: {})",
self.subscriptions.len(),
self.get_client_prefix(),
);
}
}

523
src/db.rs
View File

@@ -1,29 +1,31 @@
//! Event persistence and querying
use crate::config::SETTINGS;
use crate::error::Error;
use crate::error::Result;
use crate::event::Event;
//use crate::config::SETTINGS;
use crate::config::Settings;
use crate::error::{Error, Result};
use crate::event::{single_char_tagname, Event};
use crate::hexrange::hex_range;
use crate::hexrange::HexSearch;
use crate::nip05;
use crate::notice::Notice;
use crate::schema::{upgrade_db, STARTUP_SQL};
use crate::subscription::ReqFilter;
use crate::subscription::Subscription;
use crate::utils::is_hex;
use crate::utils::{is_hex, is_lower_hex};
use governor::clock::Clock;
use governor::{Quota, RateLimiter};
use hex;
use log::*;
use r2d2;
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::params;
use rusqlite::types::ToSql;
use rusqlite::Connection;
use rusqlite::OpenFlags;
use std::fmt::Write as _;
use std::path::Path;
use std::thread;
use std::time::Duration;
use std::time::Instant;
use tokio::task;
use tracing::{debug, info, trace, warn};
pub type SqlitePool = r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>;
pub type PooledConnection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnectionManager>;
@@ -31,38 +33,58 @@ pub type PooledConnection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnection
/// Events submitted from a client, with a return channel for notices
pub struct SubmittedEvent {
pub event: Event,
pub notice_tx: tokio::sync::mpsc::Sender<String>,
pub notice_tx: tokio::sync::mpsc::Sender<Notice>,
}
/// Database file
pub const DB_FILE: &str = "nostr.db";
/// How frequently to run maintenance
/// How many persisted events before DB maintenannce is triggered.
pub const EVENT_MAINTENANCE_FREQ_SEC: u64 = 60;
/// How many persisted events before we pause for backups.
/// It isn't clear this is enough to make the online backup API work yet.
pub const EVENT_COUNT_BACKUP_PAUSE_TRIGGER: usize = 1000;
/// Build a database connection pool.
/// # Panics
///
/// Will panic if the pool could not be created.
#[must_use]
pub fn build_pool(
name: &str,
settings: &Settings,
flags: OpenFlags,
min_size: u32,
max_size: u32,
wait_for_db: bool,
) -> SqlitePool {
let settings = SETTINGS.read().unwrap();
let db_dir = &settings.database.data_directory;
let full_path = Path::new(db_dir).join(DB_FILE);
// small hack; if the database doesn't exist yet, that means the
// writer thread hasn't finished. Give it a chance to work. This
// is only an issue with the first time we run.
if !settings.database.in_memory {
while !full_path.exists() && wait_for_db {
debug!("Database reader pool is waiting on the database to be created...");
thread::sleep(Duration::from_millis(500));
}
let manager = SqliteConnectionManager::file(&full_path)
}
let manager = if settings.database.in_memory {
SqliteConnectionManager::memory()
.with_flags(flags)
.with_init(|c| c.execute_batch(STARTUP_SQL));
.with_init(|c| c.execute_batch(STARTUP_SQL))
} else {
SqliteConnectionManager::file(&full_path)
.with_flags(flags)
.with_init(|c| c.execute_batch(STARTUP_SQL))
};
let pool: SqlitePool = r2d2::Pool::builder()
.test_on_check_out(true) // no noticeable performance hit
.min_idle(Some(min_size))
.max_size(max_size)
.max_lifetime(Some(Duration::from_secs(30)))
.build(manager)
.unwrap();
info!(
@@ -72,43 +94,76 @@ pub fn build_pool(
pool
}
/// Build a single database connection, with provided flags
pub fn build_conn(flags: OpenFlags) -> Result<Connection> {
let settings = SETTINGS.read().unwrap();
let db_dir = &settings.database.data_directory;
let full_path = Path::new(db_dir).join(DB_FILE);
// create a connection
Ok(Connection::open_with_flags(&full_path, flags)?)
/// Perform normal maintenance
pub fn optimize_db(conn: &mut PooledConnection) -> Result<()> {
let start = Instant::now();
conn.execute_batch("PRAGMA optimize;")?;
info!("optimize ran in {:?}", start.elapsed());
Ok(())
}
#[derive(Debug)]
enum SqliteReturnStatus {
SqliteOk,
SqliteBusy,
SqliteError,
SqliteOther(u64),
}
/// Checkpoint/Truncate WAL
pub fn checkpoint_db(conn: &mut PooledConnection) -> Result<()> {
let query = "PRAGMA wal_checkpoint(TRUNCATE);";
let start = Instant::now();
let (cp_result, wal_size, _frames_checkpointed) = conn.query_row(query, [], |row| {
let checkpoint_result: u64 = row.get(0)?;
let wal_size: u64 = row.get(1)?;
let frames_checkpointed: u64 = row.get(2)?;
Ok((checkpoint_result, wal_size, frames_checkpointed))
})?;
let result = match cp_result {
0 => SqliteReturnStatus::SqliteOk,
1 => SqliteReturnStatus::SqliteBusy,
2 => SqliteReturnStatus::SqliteError,
x => SqliteReturnStatus::SqliteOther(x),
};
info!(
"checkpoint ran in {:?} (result: {:?}, WAL size: {})",
start.elapsed(),
result,
wal_size
);
Ok(())
}
/// Spawn a database writer that persists events to the SQLite store.
pub async fn db_writer(
settings: Settings,
mut event_rx: tokio::sync::mpsc::Receiver<SubmittedEvent>,
bcast_tx: tokio::sync::broadcast::Sender<Event>,
metadata_tx: tokio::sync::broadcast::Sender<Event>,
mut shutdown: tokio::sync::broadcast::Receiver<()>,
) -> tokio::task::JoinHandle<Result<()>> {
let settings = SETTINGS.read().unwrap();
// are we performing NIP-05 checking?
let nip05_active = settings.verified_users.is_active();
// are we requriing NIP-05 user verification?
let nip05_enabled = settings.verified_users.is_enabled();
task::spawn_blocking(move || {
// get database configuration settings
let settings = SETTINGS.read().unwrap();
let db_dir = &settings.database.data_directory;
let full_path = Path::new(db_dir).join(DB_FILE);
// create a connection pool
let pool = build_pool(
"event writer",
&settings,
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
1,
4,
2,
false,
);
if settings.database.in_memory {
info!("using in-memory database, this will not persist a restart!");
} else {
info!("opened database {:?} for writing", full_path);
}
upgrade_db(&mut pool.get()?)?;
// Make a copy of the whitelist
@@ -118,6 +173,10 @@ pub async fn db_writer(
let rps_setting = settings.limits.messages_per_sec;
let mut most_recent_rate_limit = Instant::now();
let mut lim_opt = None;
// Constant writing has interfered with online backups. Keep
// track of how long since we've given the backups a chance to
// run.
let mut backup_pause_counter: usize = 0;
let clock = governor::clock::QuantaClock::default();
if let Some(rps) = rps_setting {
if rps > 0 {
@@ -137,12 +196,15 @@ pub async fn db_writer(
if next_event.is_none() {
break;
}
// track if an event write occurred; this is used to
// update the rate limiter
let mut event_write = false;
let subm_event = next_event.unwrap();
let event = subm_event.event;
let notice_tx = subm_event.notice_tx;
// check if this event is authorized.
if let Some(allowed_addrs) = whitelist {
// TODO: incorporate delegated pubkeys
// if the event address is not in allowed_addrs.
if !allowed_addrs.contains(&event.pubkey) {
info!(
@@ -150,7 +212,10 @@ pub async fn db_writer(
event.get_event_id_prefix()
);
notice_tx
.try_send("pubkey is not allowed to publish to this relay".to_owned())
.try_send(Notice::blocked(
event.id,
"pubkey is not allowed to publish to this relay",
))
.ok();
continue;
}
@@ -169,22 +234,23 @@ pub async fn db_writer(
if nip05_enabled {
match nip05::query_latest_user_verification(pool.get()?, event.pubkey.to_owned()) {
Ok(uv) => {
if uv.is_valid() {
if uv.is_valid(&settings.verified_users) {
info!(
"new event from verified author ({:?},{:?})",
uv.name.to_string(),
event.get_author_prefix()
);
} else {
info!("rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)",
info!(
"rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)",
uv.name.to_string(),
event.get_author_prefix()
);
notice_tx
.try_send(
"NIP-05 verification is no longer valid (expired/wrong domain)"
.to_owned(),
)
.try_send(Notice::blocked(
event.id,
"NIP-05 verification is no longer valid (expired/wrong domain)",
))
.ok();
continue;
}
@@ -195,7 +261,10 @@ pub async fn db_writer(
event.get_author_prefix()
);
notice_tx
.try_send("NIP-05 verification needed to publish events".to_owned())
.try_send(Notice::blocked(
event.id,
"NIP-05 verification needed to publish events",
))
.ok();
continue;
}
@@ -207,13 +276,25 @@ pub async fn db_writer(
}
// TODO: cache recent list of authors to remove a DB call.
let start = Instant::now();
if event.kind >= 20000 && event.kind < 30000 {
bcast_tx.send(event.clone()).ok();
info!(
"published ephemeral event: {:?} from: {:?} in: {:?}",
event.get_event_id_prefix(),
event.get_author_prefix(),
start.elapsed()
);
event_write = true
} else {
log_pool_stats("writer", &pool);
match write_event(&mut pool.get()?, &event) {
Ok(updated) => {
if updated == 0 {
trace!("ignoring duplicate event");
trace!("ignoring duplicate or deleted event");
notice_tx.try_send(Notice::duplicate(event.id)).ok();
} else {
info!(
"persisted event {:?} from {:?} in {:?}",
"persisted event: {:?} from: {:?} in: {:?}",
event.get_event_id_prefix(),
event.get_author_prefix(),
start.elapsed()
@@ -221,16 +302,20 @@ pub async fn db_writer(
event_write = true;
// send this out to all clients
bcast_tx.send(event.clone()).ok();
notice_tx.try_send(Notice::saved(event.id)).ok();
}
}
Err(err) => {
warn!("event insert failed: {:?}", err);
notice_tx
.try_send(
"relay experienced an error trying to publish the latest event"
.to_owned(),
)
.ok();
let msg = "relay experienced an error trying to publish the latest event";
notice_tx.try_send(Notice::error(event.id, msg)).ok();
}
}
backup_pause_counter += 1;
if backup_pause_counter > EVENT_COUNT_BACKUP_PAUSE_TRIGGER {
info!("pausing db write thread for a moment...");
thread::sleep(Duration::from_millis(500));
backup_pause_counter = 0
}
}
@@ -268,16 +353,18 @@ pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
let tx = conn.transaction()?;
// get relevant fields from event and convert to blobs.
let id_blob = hex::decode(&e.id).ok();
let pubkey_blob = hex::decode(&e.pubkey).ok();
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
let delegator_blob: Option<Vec<u8>> = e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
let event_str = serde_json::to_string(&e).ok();
// ignore if the event hash is a duplicate.
let ins_count = tx.execute(
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, strftime('%s','now'), FALSE);",
params![id_blob, e.created_at, e.kind, pubkey_blob, event_str]
let mut ins_count = tx.execute(
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, strftime('%s','now'), FALSE);",
params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str]
)?;
if ins_count == 0 {
// if the event was a duplicate, no need to insert event or
// pubkey references.
tx.rollback().ok();
return Ok(ins_count);
}
// remember primary key of the event most recently inserted.
@@ -288,11 +375,15 @@ pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
if tag.len() >= 2 {
let tagname = &tag[0];
let tagval = &tag[1];
// if tagvalue is hex;
if is_hex(tagval) {
// only single-char tags are searchable
let tagchar_opt = single_char_tagname(tagname);
match &tagchar_opt {
Some(_) => {
// if tagvalue is lowercase hex;
if is_lower_hex(tagval) && (tagval.len() % 2 == 0) {
tx.execute(
"INSERT OR IGNORE INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3)",
params![ev_id, &tagname, hex::decode(&tagval).ok()],
params![ev_id, &tagname, hex::decode(tagval).ok()],
)?;
} else {
tx.execute(
@@ -301,35 +392,69 @@ pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
)?;
}
}
None => {}
}
// if this event is for a metadata update, hide every other kind=0
// event from the same author that was issued earlier than this.
if e.kind == 0 {
}
}
// if this event is replaceable update, hide every other replaceable
// event with the same kind from the same author that was issued
// earlier than this.
if e.kind == 0 || e.kind == 3 || (e.kind >= 10000 && e.kind < 20000) {
let update_count = tx.execute(
"UPDATE event SET hidden=TRUE WHERE id!=? AND kind=0 AND author=? AND created_at <= ? and hidden!=TRUE",
params![ev_id, hex::decode(&e.pubkey).ok(), e.created_at],
"UPDATE event SET hidden=TRUE WHERE id!=? AND kind=? AND author=? AND created_at <= ? and hidden!=TRUE",
params![ev_id, e.kind, hex::decode(&e.pubkey).ok(), e.created_at],
)?;
if update_count > 0 {
info!(
"hid {} older metadata events for author {:?}",
"hid {} older replaceable kind {} events for author: {:?}",
update_count,
e.kind,
e.get_author_prefix()
);
}
}
// if this event is for a contact update, hide every other kind=3
// event from the same author that was issued earlier than this.
if e.kind == 3 {
let update_count = tx.execute(
"UPDATE event SET hidden=TRUE WHERE id!=? AND kind=3 AND author=? AND created_at <= ? and hidden!=TRUE",
params![ev_id, hex::decode(&e.pubkey).ok(), e.created_at],
)?;
if update_count > 0 {
// if this event is a deletion, hide the referenced events from the same author.
if e.kind == 5 {
let event_candidates = e.tag_values_by_name("e");
// first parameter will be author
let mut params: Vec<Box<dyn ToSql>> = vec![Box::new(hex::decode(&e.pubkey)?)];
event_candidates
.iter()
.filter(|x| is_hex(x) && x.len() == 64)
.filter_map(|x| hex::decode(x).ok())
.for_each(|x| params.push(Box::new(x)));
let query = format!(
"UPDATE event SET hidden=TRUE WHERE kind!=5 AND author=? AND event_hash IN ({})",
repeat_vars(params.len() - 1)
);
let mut stmt = tx.prepare(&query)?;
let update_count = stmt.execute(rusqlite::params_from_iter(params))?;
info!(
"hid {} older contact events for author {:?}",
"hid {} deleted events for author {:?}",
update_count,
e.get_author_prefix()
);
} else {
// check if a deletion has already been recorded for this event.
// Only relevant for non-deletion events
let del_count = tx.query_row(
"SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.author=? AND t.name='e' AND e.kind=5 AND t.value_hex=? LIMIT 1;",
params![pubkey_blob, id_blob], |row| row.get::<usize, usize>(0));
// check if a the query returned a result, meaning we should
// hid the current event
if del_count.ok().is_some() {
// a deletion already existed, mark original event as hidden.
info!(
"hid event: {:?} due to existing deletion by author: {:?}",
e.get_event_id_prefix(),
e.get_author_prefix()
);
let _update_count =
tx.execute("UPDATE event SET hidden=TRUE WHERE id=?", params![ev_id])?;
// event was deleted, so let caller know nothing new
// arrived, preventing this from being sent to active
// subscriptions
ins_count = 0;
}
}
tx.commit()?;
@@ -337,7 +462,7 @@ pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
}
/// Serialized event associated with a specific subscription request.
#[derive(PartialEq, Debug, Clone)]
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct QueryResult {
/// Subscription identifier
pub sub_id: String,
@@ -356,21 +481,26 @@ fn repeat_vars(count: usize) -> String {
s
}
/// Create a dynamic SQL query string and params from a subscription.
fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
/// Create a dynamic SQL subquery and params from a subscription filter.
fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
// build a dynamic SQL query. all user-input is either an integer
// (sqli-safe), or a string that is filtered to only contain
// hexadecimal characters. Strings that require escaping (tag
// names/values) use parameters.
let mut query =
"SELECT DISTINCT(e.content) FROM event e LEFT JOIN tag t ON e.id=t.event_id ".to_owned();
// parameters
// if the filter is malformed, don't return anything.
if f.force_no_match {
let empty_query = "SELECT e.content, e.created_at FROM event e WHERE 1=0".to_owned();
// query parameters for SQLite
let empty_params: Vec<Box<dyn ToSql>> = vec![];
return (empty_query, empty_params);
}
let mut query = "SELECT e.content, e.created_at FROM event e".to_owned();
// query parameters for SQLite
let mut params: Vec<Box<dyn ToSql>> = vec![];
// for every filter in the subscription, generate a where clause
let mut filter_clauses: Vec<String> = Vec::new();
for f in sub.filters.iter() {
// individual filter components
// individual filter components (single conditions such as an author or event ID)
let mut filter_components: Vec<String> = Vec::new();
// Query for "authors", allowing prefix matches
if let Some(authvec) = &f.authors {
@@ -379,16 +509,22 @@ fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
for auth in authvec {
match hex_range(auth) {
Some(HexSearch::Exact(ex)) => {
auth_searches.push("author=?".to_owned());
auth_searches.push("author=? OR delegated_by=?".to_owned());
params.push(Box::new(ex.clone()));
params.push(Box::new(ex));
}
Some(HexSearch::Range(lower, upper)) => {
auth_searches.push("(author>? AND author<?)".to_owned());
auth_searches.push(
"(author>? AND author<?) OR (delegated_by>? AND delegated_by<?)".to_owned(),
);
params.push(Box::new(lower.clone()));
params.push(Box::new(upper.clone()));
params.push(Box::new(lower));
params.push(Box::new(upper));
}
Some(HexSearch::LowerOnly(lower)) => {
auth_searches.push("author>?".to_owned());
auth_searches.push("author>? OR delegated_by>?".to_owned());
params.push(Box::new(lower.clone()));
params.push(Box::new(lower));
}
None => {
@@ -396,8 +532,14 @@ fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
}
}
}
if !authvec.is_empty() {
let authors_clause = format!("({})", auth_searches.join(" OR "));
filter_components.push(authors_clause);
} else {
// if the authors list was empty, we should never return
// any results.
filter_components.push("false".to_owned());
}
}
// Query for Kind
if let Some(ks) = &f.kinds {
@@ -430,8 +572,14 @@ fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
}
}
}
if !idvec.is_empty() {
let id_clause = format!("({})", id_searches.join(" OR "));
filter_components.push(id_clause);
} else {
// if the ids list was empty, we should never return
// any results.
filter_components.push("false".to_owned());
}
}
// Query for tags
if let Some(map) = &f.tags {
@@ -439,8 +587,8 @@ fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
let mut str_vals: Vec<Box<dyn ToSql>> = vec![];
let mut blob_vals: Vec<Box<dyn ToSql>> = vec![];
for v in val {
if is_hex(v) {
if let Ok(h) = hex::decode(&v) {
if (v.len() % 2 == 0) && is_lower_hex(v) {
if let Ok(h) = hex::decode(v) {
blob_vals.push(Box::new(h));
}
} else {
@@ -450,9 +598,13 @@ fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
// create clauses with "?" params for each tag value being searched
let str_clause = format!("value IN ({})", repeat_vars(str_vals.len()));
let blob_clause = format!("value_hex IN ({})", repeat_vars(blob_vals.len()));
let tag_clause = format!("(name=? AND ({} OR {}))", str_clause, blob_clause);
// find evidence of the target tag name/value existing for this event.
let tag_clause = format!(
"e.id IN (SELECT e.id FROM event e LEFT JOIN tag t on e.id=t.event_id WHERE hidden!=TRUE and (name=? AND ({} OR {})))",
str_clause, blob_clause
);
// add the tag name as the first parameter
params.push(Box::new(key.to_owned()));
params.push(Box::new(key.to_string()));
// add all tag values that are plain strings as params
params.append(&mut str_vals);
// add all tag values that are blobs as params
@@ -470,31 +622,88 @@ fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
let until_clause = format!("created_at < {}", f.until.unwrap());
filter_components.push(until_clause);
}
// combine all clauses, and add to filter_clauses
if !filter_components.is_empty() {
let mut fc = "( ".to_owned();
fc.push_str(&filter_components.join(" AND "));
fc.push_str(" )");
filter_clauses.push(fc);
}
}
// never display hidden events
query.push_str(" WHERE hidden!=TRUE");
// combine all filters with OR clauses, if any exist
if !filter_clauses.is_empty() {
query.push_str(" AND (");
query.push_str(&filter_clauses.join(" OR "));
query.push_str(") ");
// build filter component conditions
if !filter_components.is_empty() {
query.push_str(" AND ");
query.push_str(&filter_components.join(" AND "));
}
// Apply per-filter limit to this subquery.
// The use of a LIMIT implies a DESC order, to capture only the most recent events.
if let Some(lim) = f.limit {
let _ = write!(query, " ORDER BY e.created_at DESC LIMIT {}", lim);
} else {
query.push_str(" ORDER BY e.created_at ASC")
}
// add order clause
query.push_str(" ORDER BY created_at ASC");
debug!("query string: {}", query);
(query, params)
}
/// Create a dynamic SQL query string and params from a subscription.
fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
// build a dynamic SQL query for an entire subscription, based on
// SQL subqueries for filters.
let mut subqueries: Vec<String> = Vec::new();
// subquery params
let mut params: Vec<Box<dyn ToSql>> = vec![];
// for every filter in the subscription, generate a subquery
for f in sub.filters.iter() {
let (f_subquery, mut f_params) = query_from_filter(f);
subqueries.push(f_subquery);
params.append(&mut f_params);
}
// encapsulate subqueries into select statements
let subqueries_selects: Vec<String> = subqueries
.iter()
.map(|s| format!("SELECT distinct content, created_at FROM ({})", s))
.collect();
let query: String = subqueries_selects.join(" UNION ");
(query, params)
}
/// Check if the pool is fully utilized
fn _pool_at_capacity(pool: &SqlitePool) -> bool {
let state: r2d2::State = pool.state();
state.idle_connections == 0
}
/// Log pool stats
fn log_pool_stats(name: &str, pool: &SqlitePool) {
let state: r2d2::State = pool.state();
let in_use_cxns = state.connections - state.idle_connections;
trace!(
"DB pool {:?} usage (in_use: {}, available: {})",
name,
in_use_cxns,
state.connections
);
if state.connections == in_use_cxns {
debug!("DB pool {:?} is empty (in_use: {})", name, in_use_cxns);
}
}
/// Perform database maintenance on a regular basis
pub async fn db_maintenance(pool: SqlitePool) {
tokio::task::spawn(async move {
loop {
tokio::select! {
_ = tokio::time::sleep(Duration::from_secs(EVENT_MAINTENANCE_FREQ_SEC)) => {
if let Ok(mut conn) = pool.get() {
// the busy timer will block writers, so don't set
// this any higher than you want max latency for event
// writes.
conn.busy_timeout(Duration::from_secs(1)).ok();
debug!("running database optimizer");
optimize_db(&mut conn).ok();
debug!("running wal_checkpoint(TRUNCATE)");
checkpoint_db(&mut conn).ok();
}
}
};
}
});
}
/// Perform a database query using a subscription.
///
/// The [`Subscription`] is converted into a SQL query. Each result
@@ -503,39 +712,139 @@ fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
/// query is immediately aborted.
pub async fn db_query(
sub: Subscription,
conn: PooledConnection,
client_id: String,
pool: SqlitePool,
query_tx: tokio::sync::mpsc::Sender<QueryResult>,
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
) {
let pre_spawn_start = Instant::now();
task::spawn_blocking(move || {
debug!("going to query for: {:?}", sub);
let mut row_count: usize = 0;
let db_queue_time = pre_spawn_start.elapsed();
// if the queue time was very long (>5 seconds), spare the DB and abort.
if db_queue_time > Duration::from_secs(5) {
info!(
"shedding DB query load from {:?} (cid: {}, sub: {:?})",
db_queue_time, client_id, sub.id
);
return Ok(());
}
// otherwise, report queuing time if it is slow
else if db_queue_time > Duration::from_secs(1) {
debug!(
"(slow) DB query queued for {:?} (cid: {}, sub: {:?})",
db_queue_time, client_id, sub.id
);
}
let start = Instant::now();
let mut row_count: usize = 0;
// generate SQL query
let (q, p) = query_from_sub(&sub);
let sql_gen_elapsed = start.elapsed();
if sql_gen_elapsed > Duration::from_millis(10) {
debug!("SQL (slow) generated in {:?}", start.elapsed());
}
// show pool stats
log_pool_stats("reader", &pool);
// cutoff for displaying slow queries
let slow_cutoff = Duration::from_millis(2000);
// any client that doesn't cause us to generate new rows in 5
// seconds gets dropped.
let abort_cutoff = Duration::from_secs(5);
let start = Instant::now();
let mut slow_first_event;
let mut last_successful_send = Instant::now();
if let Ok(conn) = pool.get() {
// execute the query. Don't cache, since queries vary so much.
let mut stmt = conn.prepare(&q)?;
let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?;
let mut first_result = true;
while let Some(row) = event_rows.next()? {
// check if this is still active (we could do this every N rows)
if abandon_query_rx.try_recv().is_ok() {
debug!("query aborted");
let first_event_elapsed = start.elapsed();
slow_first_event = first_event_elapsed >= slow_cutoff;
if first_result {
debug!(
"first result in {:?} (cid: {}, sub: {:?})",
first_event_elapsed, client_id, sub.id
);
first_result = false;
}
// logging for slow queries; show sub and SQL.
// to reduce logging; only show 1/16th of clients (leading 0)
if row_count == 0 && slow_first_event && client_id.starts_with("0") {
debug!(
"query req (slow): {:?} (cid: {}, sub: {:?})",
sub, client_id, sub.id
);
debug!(
"query string (slow): {} (cid: {}, sub: {:?})",
q, client_id, sub.id
);
} else {
trace!(
"query req: {:?} (cid: {}, sub: {:?})",
sub,
client_id,
sub.id
);
trace!(
"query string: {} (cid: {}, sub: {:?})",
q,
client_id,
sub.id
);
}
// check if this is still active; every 100 rows
if row_count % 100 == 0 && abandon_query_rx.try_recv().is_ok() {
debug!("query aborted (cid: {}, sub: {:?})", client_id, sub.id);
return Ok(());
}
row_count += 1;
let event_json = row.get(0)?;
loop {
if query_tx.capacity() != 0 {
// we have capacity to add another item
break;
} else {
// the queue is full
trace!("db reader thread is stalled");
if last_successful_send + abort_cutoff < Instant::now() {
// the queue has been full for too long, abort
info!("aborting database query due to slow client");
let ok: Result<()> = Ok(());
return ok;
}
// give the queue a chance to clear before trying again
thread::sleep(Duration::from_millis(100));
}
}
// TODO: we could use try_send, but we'd have to juggle
// getting the query result back as part of the error
// result.
query_tx
.blocking_send(QueryResult {
sub_id: sub.get_id(),
event: event_json,
})
.ok();
last_successful_send = Instant::now();
}
query_tx
.blocking_send(QueryResult {
sub_id: sub.get_id(),
event: "EOSE".to_string(),
})
.ok();
debug!(
"query completed ({} rows) in {:?}",
row_count,
start.elapsed()
"query completed in {:?} (cid: {}, sub: {:?}, db_time: {:?}, rows: {})",
pre_spawn_start.elapsed(),
client_id,
sub.id,
start.elapsed(),
row_count
);
} else {
warn!("Could not get a database connection for querying");
}
let ok: Result<()> = Ok(());
ok
});

416
src/delegation.rs Normal file
View File

@@ -0,0 +1,416 @@
//! Event parsing and validation
use crate::error::Error;
use crate::error::Result;
use crate::event::Event;
use bitcoin_hashes::{sha256, Hash};
use lazy_static::lazy_static;
use regex::Regex;
use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
use serde::{Deserialize, Serialize};
use std::str::FromStr;
use tracing::{debug, info};
// This handles everything related to delegation, in particular the
// condition/rune parsing and logic.
// Conditions are poorly specified, so we will implement the minimum
// necessary for now.
// fields MUST be either "kind" or "created_at".
// operators supported are ">", "<", "=", "!".
// no operations on 'content' are supported.
// this allows constraints for:
// valid date ranges (valid from X->Y dates).
// specific kinds (publish kind=1,5)
// kind ranges (publish ephemeral events, kind>19999&kind<30001)
// for more complex scenarios (allow delegatee to publish ephemeral
// AND replacement events), it may be necessary to generate and use
// different condition strings, since we do not support grouping or
// "OR" logic.
lazy_static! {
/// Secp256k1 verification instance.
pub static ref SECP: Secp256k1<VerifyOnly> = Secp256k1::verification_only();
}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub enum Field {
Kind,
CreatedAt,
}
impl FromStr for Field {
type Err = Error;
fn from_str(value: &str) -> Result<Self, Self::Err> {
if value == "kind" {
Ok(Field::Kind)
} else if value == "created_at" {
Ok(Field::CreatedAt)
} else {
Err(Error::DelegationParseError)
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub enum Operator {
LessThan,
GreaterThan,
Equals,
NotEquals,
}
impl FromStr for Operator {
type Err = Error;
fn from_str(value: &str) -> Result<Self, Self::Err> {
if value == "<" {
Ok(Operator::LessThan)
} else if value == ">" {
Ok(Operator::GreaterThan)
} else if value == "=" {
Ok(Operator::Equals)
} else if value == "!" {
Ok(Operator::NotEquals)
} else {
Err(Error::DelegationParseError)
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct ConditionQuery {
pub(crate) conditions: Vec<Condition>,
}
impl ConditionQuery {
pub fn allows_event(&self, event: &Event) -> bool {
// check each condition, to ensure that the event complies
// with the restriction.
for c in &self.conditions {
if !c.allows_event(event) {
// any failing conditions invalidates the delegation
// on this event
return false;
}
}
// delegation was permitted unconditionally, or all conditions
// were true
true
}
}
// Verify that the delegator approved the delegation; return a ConditionQuery if so.
pub fn validate_delegation(
delegator: &str,
delegatee: &str,
cond_query: &str,
sigstr: &str,
) -> Option<ConditionQuery> {
// form the token
let tok = format!("nostr:delegation:{}:{}", delegatee, cond_query);
// form SHA256 hash
let digest: sha256::Hash = sha256::Hash::hash(tok.as_bytes());
let sig = schnorr::Signature::from_str(sigstr).unwrap();
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
if let Ok(pubkey) = XOnlyPublicKey::from_str(delegator) {
let verify = SECP.verify_schnorr(&sig, &msg, &pubkey);
if verify.is_ok() {
// return the parsed condition query
cond_query.parse::<ConditionQuery>().ok()
} else {
debug!("client sent an delegation signature that did not validate");
None
}
} else {
debug!("client sent malformed delegation pubkey");
None
}
} else {
info!("error converting delegation digest to secp256k1 message");
None
}
}
/// Parsed delegation condition
/// see https://github.com/nostr-protocol/nips/pull/28#pullrequestreview-1084903800
/// An example complex condition would be: kind=1,2,3&created_at<1665265999
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct Condition {
pub(crate) field: Field,
pub(crate) operator: Operator,
pub(crate) values: Vec<u64>,
}
impl Condition {
/// Check if this condition allows the given event to be delegated
pub fn allows_event(&self, event: &Event) -> bool {
// determine what the right-hand side of the operator is
let resolved_field = match &self.field {
Field::Kind => event.kind,
Field::CreatedAt => event.created_at,
};
match &self.operator {
Operator::LessThan => {
// the less-than operator is only valid for single values.
if self.values.len() == 1 {
if let Some(v) = self.values.first() {
return resolved_field < *v;
}
}
}
Operator::GreaterThan => {
// the greater-than operator is only valid for single values.
if self.values.len() == 1 {
if let Some(v) = self.values.first() {
return resolved_field > *v;
}
}
}
Operator::Equals => {
// equals is interpreted as "must be equal to at least one provided value"
return self.values.iter().any(|&x| resolved_field == x);
}
Operator::NotEquals => {
// not-equals is interpreted as "must not be equal to any provided value"
// this is the one case where an empty list of values could be allowed; even though it is a pointless restriction.
return self.values.iter().all(|&x| resolved_field != x);
}
}
false
}
}
fn str_to_condition(cs: &str) -> Option<Condition> {
// a condition is a string (alphanum+underscore), an operator (<>=!), and values (num+comma)
lazy_static! {
static ref RE: Regex = Regex::new("([[:word:]]+)([<>=!]+)([,[[:digit:]]]*)").unwrap();
}
// match against the regex
let caps = RE.captures(cs)?;
let field = caps.get(1)?.as_str().parse::<Field>().ok()?;
let operator = caps.get(2)?.as_str().parse::<Operator>().ok()?;
// values are just comma separated numbers, but all must be parsed
let rawvals = caps.get(3)?.as_str();
let values = rawvals
.split_terminator(',')
.map(|n| n.parse::<u64>().ok())
.collect::<Option<Vec<_>>>()?;
// convert field string into Field
Some(Condition {
field,
operator,
values,
})
}
/// Parse a condition query from a string slice
impl FromStr for ConditionQuery {
type Err = Error;
fn from_str(value: &str) -> Result<Self, Self::Err> {
// split the string with '&'
let mut conditions = vec![];
let condstrs = value.split_terminator('&');
// parse each individual condition
for c in condstrs {
conditions.push(str_to_condition(c).ok_or(Error::DelegationParseError)?);
}
Ok(ConditionQuery { conditions })
}
}
#[cfg(test)]
mod tests {
use super::*;
// parse condition strings
#[test]
fn parse_empty() -> Result<()> {
// given an empty condition query, produce an empty vector
let empty_cq = ConditionQuery { conditions: vec![] };
let parsed = "".parse::<ConditionQuery>()?;
assert_eq!(parsed, empty_cq);
Ok(())
}
// parse field 'kind'
#[test]
fn test_kind_field_parse() -> Result<()> {
let field = "kind".parse::<Field>()?;
assert_eq!(field, Field::Kind);
Ok(())
}
// parse field 'created_at'
#[test]
fn test_created_at_field_parse() -> Result<()> {
let field = "created_at".parse::<Field>()?;
assert_eq!(field, Field::CreatedAt);
Ok(())
}
// parse unknown field
#[test]
fn unknown_field_parse() {
let field = "unk".parse::<Field>();
assert!(field.is_err());
}
// parse a full conditional query with an empty array
#[test]
fn parse_kind_equals_empty() -> Result<()> {
// given an empty condition query, produce an empty vector
let kind_cq = ConditionQuery {
conditions: vec![Condition {
field: Field::Kind,
operator: Operator::Equals,
values: vec![],
}],
};
let parsed = "kind=".parse::<ConditionQuery>()?;
assert_eq!(parsed, kind_cq);
Ok(())
}
// parse a full conditional query with a single value
#[test]
fn parse_kind_equals_singleval() -> Result<()> {
// given an empty condition query, produce an empty vector
let kind_cq = ConditionQuery {
conditions: vec![Condition {
field: Field::Kind,
operator: Operator::Equals,
values: vec![1],
}],
};
let parsed = "kind=1".parse::<ConditionQuery>()?;
assert_eq!(parsed, kind_cq);
Ok(())
}
// parse a full conditional query with multiple values
#[test]
fn parse_kind_equals_multival() -> Result<()> {
// given an empty condition query, produce an empty vector
let kind_cq = ConditionQuery {
conditions: vec![Condition {
field: Field::Kind,
operator: Operator::Equals,
values: vec![1, 2, 4],
}],
};
let parsed = "kind=1,2,4".parse::<ConditionQuery>()?;
assert_eq!(parsed, kind_cq);
Ok(())
}
// parse multiple conditions
#[test]
fn parse_multi_conditions() -> Result<()> {
// given an empty condition query, produce an empty vector
let cq = ConditionQuery {
conditions: vec![
Condition {
field: Field::Kind,
operator: Operator::GreaterThan,
values: vec![10000],
},
Condition {
field: Field::Kind,
operator: Operator::LessThan,
values: vec![20000],
},
Condition {
field: Field::Kind,
operator: Operator::NotEquals,
values: vec![10001],
},
Condition {
field: Field::CreatedAt,
operator: Operator::LessThan,
values: vec![1665867123],
},
],
};
let parsed =
"kind>10000&kind<20000&kind!10001&created_at<1665867123".parse::<ConditionQuery>()?;
assert_eq!(parsed, cq);
Ok(())
}
fn simple_event() -> Event {
Event {
id: "0".to_owned(),
pubkey: "0".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: vec![],
content: "".to_owned(),
sig: "0".to_owned(),
tagidx: None,
}
}
// Check for condition logic on event w/ empty values
#[test]
fn condition_with_empty_values() {
let mut c = Condition {
field: Field::Kind,
operator: Operator::GreaterThan,
values: vec![],
};
let e = simple_event();
assert!(!c.allows_event(&e));
c.operator = Operator::LessThan;
assert!(!c.allows_event(&e));
c.operator = Operator::Equals;
assert!(!c.allows_event(&e));
// Not Equals applied to an empty list *is* allowed
// (pointless, but logically valid).
c.operator = Operator::NotEquals;
assert!(c.allows_event(&e));
}
// Check for condition logic on event w/ single value
#[test]
fn condition_kind_gt_event_single() {
let c = Condition {
field: Field::Kind,
operator: Operator::GreaterThan,
values: vec![10],
};
let mut e = simple_event();
// kind is not greater than 10, not allowed
e.kind = 1;
assert!(!c.allows_event(&e));
// kind is greater than 10, allowed
e.kind = 100;
assert!(c.allows_event(&e));
// kind is 10, not allowed
e.kind = 10;
assert!(!c.allows_event(&e));
}
// Check for condition logic on event w/ multi values
#[test]
fn condition_with_multi_values() {
let mut c = Condition {
field: Field::Kind,
operator: Operator::Equals,
values: vec![0, 10, 20],
};
let mut e = simple_event();
// Allow if event kind is in list for Equals
e.kind = 10;
assert!(c.allows_event(&e));
// Deny if event kind is not in list for Equals
e.kind = 11;
assert!(!c.allows_event(&e));
// Deny if event kind is in list for NotEquals
e.kind = 10;
c.operator = Operator::NotEquals;
assert!(!c.allows_event(&e));
// Allow if event kind is not in list for NotEquals
e.kind = 99;
c.operator = Operator::NotEquals;
assert!(c.allows_event(&e));
// Always deny if GreaterThan/LessThan for a list
c.operator = Operator::LessThan;
assert!(!c.allows_event(&e));
c.operator = Operator::GreaterThan;
assert!(!c.allows_event(&e));
}
}

View File

@@ -17,10 +17,16 @@ pub enum Error {
ConnWriteError,
#[error("EVENT parse failed")]
EventParseFailed,
#[error("ClOSE message parse failed")]
#[error("CLOSE message parse failed")]
CloseParseFailed,
#[error("Event validation failed")]
EventInvalid,
#[error("Event invalid signature")]
EventInvalidSignature,
#[error("Event invalid id")]
EventInvalidId,
#[error("Event malformed pubkey")]
EventMalformedPubkey,
#[error("Event could not canonicalize")]
EventCouldNotCanonicalize,
#[error("Event too large")]
EventMaxLengthError(usize),
#[error("Subscription identifier max length exceeded")]
@@ -48,6 +54,10 @@ pub enum Error {
JoinError,
#[error("Hyper Client error")]
HyperError(hyper::Error),
#[error("Hex encoding error")]
HexError(hex::FromHexError),
#[error("Delegation parse error")]
DelegationParseError,
#[error("Unknown/Undocumented")]
UnknownError,
}
@@ -58,6 +68,12 @@ pub enum Error {
// }
//}
impl From<hex::FromHexError> for Error {
fn from(h: hex::FromHexError) -> Self {
Error::HexError(h)
}
}
impl From<hyper::Error> for Error {
fn from(h: hyper::Error) -> Self {
Error::HyperError(h)

View File

@@ -1,12 +1,11 @@
//! Event parsing and validation
use crate::config;
use crate::delegation::validate_delegation;
use crate::error::Error::*;
use crate::error::Result;
use crate::nip05;
use crate::utils::unix_time;
use bitcoin_hashes::{sha256, Hash};
use lazy_static::lazy_static;
use log::*;
use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
use serde::{Deserialize, Deserializer, Serialize};
use serde_json::value::Value;
@@ -14,6 +13,7 @@ use serde_json::Number;
use std::collections::HashMap;
use std::collections::HashSet;
use std::str::FromStr;
use tracing::{debug, info};
lazy_static! {
/// Secp256k1 verification instance.
@@ -21,17 +21,25 @@ lazy_static! {
}
/// Event command in network format.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct EventCmd {
cmd: String, // expecting static "EVENT"
event: Event,
}
impl EventCmd {
pub fn event_id(&self) -> &str {
&self.event.id
}
}
/// Parsed nostr event.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct Event {
pub id: String,
pub(crate) pubkey: String,
#[serde(skip)]
pub(crate) delegated_by: Option<String>,
pub(crate) created_at: u64,
pub(crate) kind: u64,
#[serde(deserialize_with = "tag_from_string")]
@@ -39,9 +47,9 @@ pub struct Event {
pub(crate) tags: Vec<Vec<String>>,
pub(crate) content: String,
pub(crate) sig: String,
// Optimization for tag search, built on demand
// Optimization for tag search, built on demand.
#[serde(skip)]
pub(crate) tagidx: Option<HashMap<String, HashSet<String>>>,
pub(crate) tagidx: Option<HashMap<char, HashSet<String>>>,
}
/// Simple tag type for array of array of strings.
@@ -53,7 +61,26 @@ where
D: Deserializer<'de>,
{
let opt = Option::deserialize(deserializer)?;
Ok(opt.unwrap_or_else(Vec::new))
Ok(opt.unwrap_or_default())
}
/// Attempt to form a single-char tag name.
pub fn single_char_tagname(tagname: &str) -> Option<char> {
// We return the tag character if and only if the tagname consists
// of a single char.
let mut tagnamechars = tagname.chars();
let firstchar = tagnamechars.next();
match firstchar {
Some(_) => {
// check second char
if tagnamechars.next().is_none() {
firstchar
} else {
None
}
}
None => None,
}
}
/// Convert network event to parsed/validated event.
@@ -62,12 +89,13 @@ impl From<EventCmd> for Result<Event> {
// ensure command is correct
if ec.cmd != "EVENT" {
Err(CommandUnknownError)
} else if ec.event.is_valid() {
} else {
ec.event.validate().map(|_| {
let mut e = ec.event;
e.build_index();
Ok(e)
} else {
Err(EventInvalid)
e.update_delegation();
e
})
}
}
}
@@ -92,6 +120,50 @@ impl Event {
None
}
// is this event delegated (properly)?
// does the signature match, and are conditions valid?
// if so, return an alternate author for the event
pub fn delegated_author(&self) -> Option<String> {
// is there a delegation tag?
let delegation_tag: Vec<String> = self
.tags
.iter()
.filter(|x| x.len() == 4)
.filter(|x| x.get(0).unwrap() == "delegation")
.take(1)
.next()?
.to_vec(); // get first tag
//let delegation_tag = self.tag_values_by_name("delegation");
// delegation tags should have exactly 3 elements after the name (pubkey, condition, sig)
// the event is signed by the delagatee
let delegatee = &self.pubkey;
// the delegation tag references the claimed delagator
let delegator: &str = delegation_tag.get(1)?;
let querystr: &str = delegation_tag.get(2)?;
let sig: &str = delegation_tag.get(3)?;
// attempt to get a condition query; this requires the delegation to have a valid signature.
if let Some(cond_query) = validate_delegation(delegator, delegatee, querystr, sig) {
// The signature was valid, now we ensure the delegation
// condition is valid for this event:
if cond_query.allows_event(self) {
// since this is allowed, we will provide the delegatee
Some(delegator.into())
} else {
debug!("an event failed to satisfy delegation conditions");
None
}
} else {
debug!("event had had invalid delegation signature");
None
}
}
/// Update delegation status
fn update_delegation(&mut self) {
self.delegated_by = self.delegated_author();
}
/// Build an event tag index
fn build_index(&mut self) {
// if there are no tags; just leave the index as None
@@ -99,18 +171,21 @@ impl Event {
return;
}
// otherwise, build an index
let mut idx: HashMap<String, HashSet<String>> = HashMap::new();
let mut idx: HashMap<char, HashSet<String>> = HashMap::new();
// iterate over tags that have at least 2 elements
for t in self.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
let tagnamechar = tagnamechar_opt.unwrap();
let tagval = t.get(1).unwrap();
// ensure a vector exists for this tag
if !idx.contains_key(tagname) {
idx.insert(tagname.clone(), HashSet::new());
}
idx.entry(tagnamechar).or_insert_with(HashSet::new);
// get the tag vec and insert entry
let tidx = idx.get_mut(tagname).expect("could not get tag vector");
tidx.insert(tagval.clone());
let idx_tag_vec = idx.get_mut(&tagnamechar).expect("could not get tag vector");
idx_tag_vec.insert(tagval.clone());
}
// save the tag structure
self.tagidx = Some(idx);
@@ -124,24 +199,35 @@ impl Event {
self.pubkey.chars().take(8).collect()
}
/// Check if this event has a valid signature.
fn is_valid(&self) -> bool {
// TODO: return a Result with a reason for invalid events
// don't bother to validate an event with a timestamp in the distant future.
let config = config::SETTINGS.read().unwrap();
let max_future_sec = config.options.reject_future_seconds;
if let Some(allowable_future) = max_future_sec {
/// Retrieve tag initial values across all tags matching the name
pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
self.tags
.iter()
.filter(|x| x.len() > 1)
.filter(|x| x.get(0).unwrap() == tag_name)
.map(|x| x.get(1).unwrap().to_owned())
.collect()
}
pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
if let Some(allowable_future) = reject_future_seconds {
let curr_time = unix_time();
// calculate difference, plus how far future we allow
if curr_time + (allowable_future as u64) < self.created_at {
let delta = self.created_at - curr_time;
debug!(
"Event is too far in the future ({} seconds), rejecting",
"event is too far in the future ({} seconds), rejecting",
delta
);
return false;
}
}
true
}
/// Check if this event has a valid signature.
fn validate(&self) -> Result<()> {
// TODO: return a Result with a reason for invalid events
// validation is performed by:
// * parsing JSON string into event fields
// * create an array:
@@ -149,8 +235,8 @@ impl Event {
// * serialize with no spaces/newlines
let c_opt = self.to_canonical();
if c_opt.is_none() {
debug!("event could not be canonicalized");
return false;
debug!("could not canonicalize");
return Err(EventCouldNotCanonicalize);
}
let c = c_opt.unwrap();
// * compute the sha256sum.
@@ -159,22 +245,21 @@ impl Event {
// * ensure the id matches the computed sha256sum.
if self.id != hex_digest {
debug!("event id does not match digest");
return false;
return Err(EventInvalidId);
}
// * validate the message digest (sig) using the pubkey & computed sha256 message hash.
let sig = schnorr::Signature::from_str(&self.sig).unwrap();
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
if let Ok(pubkey) = XOnlyPublicKey::from_str(&self.pubkey) {
let verify = SECP.verify_schnorr(&sig, &msg, &pubkey);
matches!(verify, Ok(()))
SECP.verify_schnorr(&sig, &msg, &pubkey)
.map_err(|_| EventInvalidSignature)
} else {
debug!("Client sent malformed pubkey");
false
debug!("client sent malformed pubkey");
Err(EventMalformedPubkey)
}
} else {
info!("Error converting digest to secp256k1 message");
false
info!("error converting digest to secp256k1 message");
Err(EventInvalidSignature)
}
}
@@ -216,9 +301,10 @@ impl Event {
}
/// Determine if the given tag and value set intersect with tags in this event.
pub fn generic_tag_val_intersect(&self, tagname: &str, check: &HashSet<String>) -> bool {
pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
match &self.tagidx {
Some(idx) => match idx.get(tagname) {
// check if this is indexable tagname
Some(idx) => match idx.get(&tagname) {
Some(valset) => {
let common = valset.intersection(check);
common.count() > 0
@@ -237,6 +323,7 @@ mod tests {
Event {
id: "0".to_owned(),
pubkey: "0".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: vec![],
@@ -263,26 +350,24 @@ mod tests {
}
#[test]
fn empty_event_tag_match() -> Result<()> {
fn empty_event_tag_match() {
let event = simple_event();
assert!(!event
.generic_tag_val_intersect("e", &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
Ok(())
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
}
#[test]
fn single_event_tag_match() -> Result<()> {
fn single_event_tag_match() {
let mut event = simple_event();
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
event.build_index();
assert_eq!(
event.generic_tag_val_intersect(
"e",
'e',
&HashSet::from(["foo".to_owned(), "bar".to_owned()])
),
true
);
Ok(())
}
#[test]
@@ -320,6 +405,7 @@ mod tests {
let e = Event {
id: "999".to_owned(),
pubkey: "012345".to_owned(),
delegated_by: None,
created_at: 501234,
kind: 1,
tags: vec![],
@@ -332,11 +418,66 @@ mod tests {
assert_eq!(c, expected);
}
#[test]
fn event_tag_select() {
let e = Event {
id: "999".to_owned(),
pubkey: "012345".to_owned(),
delegated_by: None,
created_at: 501234,
kind: 1,
tags: vec![
vec!["j".to_owned(), "abc".to_owned()],
vec!["e".to_owned(), "foo".to_owned()],
vec!["e".to_owned(), "bar".to_owned()],
vec!["e".to_owned(), "baz".to_owned()],
vec![
"p".to_owned(),
"aaaa".to_owned(),
"ws://example.com".to_owned(),
],
],
content: "this is a test".to_owned(),
sig: "abcde".to_owned(),
tagidx: None,
};
let v = e.tag_values_by_name("e");
assert_eq!(v, vec!["foo", "bar", "baz"]);
}
#[test]
fn event_no_tag_select() {
let e = Event {
id: "999".to_owned(),
pubkey: "012345".to_owned(),
delegated_by: None,
created_at: 501234,
kind: 1,
tags: vec![
vec!["j".to_owned(), "abc".to_owned()],
vec!["e".to_owned(), "foo".to_owned()],
vec!["e".to_owned(), "baz".to_owned()],
vec![
"p".to_owned(),
"aaaa".to_owned(),
"ws://example.com".to_owned(),
],
],
content: "this is a test".to_owned(),
sig: "abcde".to_owned(),
tagidx: None,
};
let v = e.tag_values_by_name("x");
// asking for tags that don't exist just returns zero-length vector
assert_eq!(v.len(), 0);
}
#[test]
fn event_canonical_with_tags() {
let e = Event {
id: "999".to_owned(),
pubkey: "012345".to_owned(),
delegated_by: None,
created_at: 501234,
kind: 1,
tags: vec![

View File

@@ -3,7 +3,7 @@ use crate::utils::is_hex;
use hex;
/// Types of hexadecimal queries.
#[derive(PartialEq, Debug, Clone)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)]
pub enum HexSearch {
// when no range is needed, exact 32-byte
Exact(Vec<u8>),
@@ -60,11 +60,10 @@ pub fn hex_range(s: &str) -> Option<HexSearch> {
upper[byte_len] = b + 16; // bump up the first character in this byte
// increment done, stop iterating through the vec
break;
} else {
}
// if it is 'f', reset the byte to 0 and do a carry
// reset and carry
upper[byte_len] = 0;
}
// done with odd logic, so don't repeat this
odd = false;
} else {
@@ -80,6 +79,7 @@ pub fn hex_range(s: &str) -> Option<HexSearch> {
#[cfg(test)]
mod tests {
use super::*;
use crate::error::Result;
#[test]
fn hex_range_exact() -> Result<()> {

View File

@@ -35,7 +35,7 @@ impl From<config::Info> for RelayInfo {
description: i.description,
pubkey: i.pubkey,
contact: i.contact,
supported_nips: Some(vec![1, 2, 11]),
supported_nips: Some(vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 26]),
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
version: CARGO_PKG_VERSION.map(|x| x.to_owned()),
}

View File

@@ -2,11 +2,15 @@ pub mod close;
pub mod config;
pub mod conn;
pub mod db;
pub mod delegation;
pub mod error;
pub mod event;
pub mod hexrange;
pub mod info;
pub mod nip05;
pub mod notice;
pub mod schema;
pub mod subscription;
pub mod utils;
// Public API for creating relays programatically
pub mod server;

View File

@@ -1,569 +1,51 @@
//! Server process
use futures::SinkExt;
use futures::StreamExt;
use hyper::header::ACCEPT;
use hyper::service::{make_service_fn, service_fn};
use hyper::upgrade::Upgraded;
use hyper::{
header, server::conn::AddrStream, upgrade, Body, Request, Response, Server, StatusCode,
};
use log::*;
use nostr_rs_relay::close::Close;
use nostr_rs_relay::close::CloseCmd;
use nostr_rs_relay::config;
use nostr_rs_relay::conn;
use nostr_rs_relay::db;
use nostr_rs_relay::db::SubmittedEvent;
use nostr_rs_relay::error::{Error, Result};
use nostr_rs_relay::event::Event;
use nostr_rs_relay::event::EventCmd;
use nostr_rs_relay::info::RelayInfo;
use nostr_rs_relay::nip05;
use nostr_rs_relay::subscription::Subscription;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::convert::Infallible;
use nostr_rs_relay::server::start_server;
use std::env;
use std::net::SocketAddr;
use std::path::Path;
use std::time::Duration;
use std::time::Instant;
use tokio::runtime::Builder;
use tokio::sync::broadcast::{self, Receiver, Sender};
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tokio_tungstenite::WebSocketStream;
use tungstenite::error::Error as WsError;
use tungstenite::handshake;
use tungstenite::protocol::Message;
use tungstenite::protocol::WebSocketConfig;
use std::sync::mpsc as syncmpsc;
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
use std::thread;
use tracing::info;
use console_subscriber::ConsoleLayer;
/// Return a requested DB name from command line arguments.
fn db_from_args(args: Vec<String>) -> Option<String> {
fn db_from_args(args: &[String]) -> Option<String> {
if args.len() == 3 && args.get(1) == Some(&"--db".to_owned()) {
return args.get(2).map(|x| x.to_owned());
return args.get(2).map(std::clone::Clone::clone);
}
None
}
/// Handle arbitrary HTTP requests, including for WebSocket upgrades.
async fn handle_web_request(
mut request: Request<Body>,
pool: db::SqlitePool,
remote_addr: SocketAddr,
broadcast: Sender<Event>,
event_tx: tokio::sync::mpsc::Sender<SubmittedEvent>,
shutdown: Receiver<()>,
) -> Result<Response<Body>, Infallible> {
match (
request.uri().path(),
request.headers().contains_key(header::UPGRADE),
) {
// Request for / as websocket
("/", true) => {
debug!("websocket with upgrade request");
//assume request is a handshake, so create the handshake response
let response = match handshake::server::create_response_with_body(&request, || {
Body::empty()
}) {
Ok(response) => {
//in case the handshake response creation succeeds,
//spawn a task to handle the websocket connection
tokio::spawn(async move {
//using the hyper feature of upgrading a connection
match upgrade::on(&mut request).await {
//if successfully upgraded
Ok(upgraded) => {
// set WebSocket configuration options
let mut config = WebSocketConfig::default();
{
let settings = config::SETTINGS.read().unwrap();
config.max_message_size = settings.limits.max_ws_message_bytes;
config.max_frame_size = settings.limits.max_ws_frame_bytes;
}
//create a websocket stream from the upgraded object
let ws_stream = WebSocketStream::from_raw_socket(
//pass the upgraded object
//as the base layer stream of the Websocket
upgraded,
tokio_tungstenite::tungstenite::protocol::Role::Server,
Some(config),
)
.await;
tokio::spawn(nostr_server(
pool, ws_stream, broadcast, event_tx, shutdown,
));
}
Err(e) => println!(
"error when trying to upgrade connection \
from address {} to websocket connection. \
Error is: {}",
remote_addr, e
),
}
});
//return the response to the handshake request
response
}
Err(error) => {
warn!("websocket response failed");
let mut res =
Response::new(Body::from(format!("Failed to create websocket: {}", error)));
*res.status_mut() = StatusCode::BAD_REQUEST;
return Ok(res);
}
};
Ok::<_, Infallible>(response)
}
// Request for Relay info
("/", false) => {
// handle request at root with no upgrade header
// Check if this is a nostr server info request
let accept_header = &request.headers().get(ACCEPT);
// check if application/nostr+json is included
if let Some(media_types) = accept_header {
if let Ok(mt_str) = media_types.to_str() {
if mt_str.contains("application/nostr+json") {
let config = config::SETTINGS.read().unwrap();
// build a relay info response
debug!("Responding to server info request");
let rinfo = RelayInfo::from(config.info.clone());
let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap());
return Ok(Response::builder()
.status(200)
.header("Content-Type", "application/nostr+json")
.body(b)
.unwrap());
}
}
}
Ok(Response::new(Body::from(
"Please use a Nostr client to connect.",
)))
}
(_, _) => {
//handle any other url
Ok(Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::from("Nothing here."))
.unwrap())
}
}
}
async fn shutdown_signal() {
// Wait for the CTRL+C signal
tokio::signal::ctrl_c()
.await
.expect("failed to install CTRL+C signal handler");
}
/// Start running a Nostr relay server.
fn main() -> Result<(), Error> {
// setup logger
let _ = env_logger::try_init();
fn main() {
// setup tracing
let _trace_sub = tracing_subscriber::fmt::try_init();
info!("Starting up from main");
// get database directory from args
let args: Vec<String> = env::args().collect();
let db_dir: Option<String> = db_from_args(args);
{
let mut settings = config::SETTINGS.write().unwrap();
let db_dir: Option<String> = db_from_args(&args);
// configure settings from config.toml
// replace default settings with those read from config.toml
let mut c = config::Settings::new();
let mut settings = config::Settings::new();
if settings.diagnostics.tracing {
// enable tracing with tokio-console
ConsoleLayer::builder().with_default_env().init();
}
// update with database location
if let Some(db) = db_dir {
c.database.data_directory = db;
}
*settings = c;
settings.database.data_directory = db;
}
let settings = config::SETTINGS.read().unwrap();
trace!("Config: {:?}", settings);
// do some config validation.
if !Path::new(&settings.database.data_directory).is_dir() {
error!("Database directory does not exist");
return Err(Error::DatabaseDirError);
}
let addr = format!(
"{}:{}",
settings.network.address.trim(),
settings.network.port
);
let socket_addr = addr.parse().expect("listening address not valid");
// address whitelisting settings
if let Some(addr_whitelist) = &settings.authorization.pubkey_whitelist {
info!(
"Event publishing restricted to {} pubkey(s)",
addr_whitelist.len()
);
}
// check if NIP-05 enforced user verification is on
if settings.verified_users.is_active() {
info!(
"NIP-05 user verification mode:{:?}",
settings.verified_users.mode
);
if let Some(d) = settings.verified_users.verify_update_duration() {
info!("NIP-05 check user verification every: {:?}", d);
}
if let Some(d) = settings.verified_users.verify_expiration_duration() {
info!("NIP-05 user verification expires after: {:?}", d);
}
if let Some(wl) = &settings.verified_users.domain_whitelist {
info!("NIP-05 domain whitelist: {:?}", wl);
}
if let Some(bl) = &settings.verified_users.domain_blacklist {
info!("NIP-05 domain blacklist: {:?}", bl);
}
}
// configure tokio runtime
let rt = Builder::new_multi_thread()
.enable_all()
.thread_name("tokio-ws")
.build()
.unwrap();
// start tokio
rt.block_on(async {
let settings = config::SETTINGS.read().unwrap();
info!("listening on: {}", socket_addr);
// all client-submitted valid events are broadcast to every
// other client on this channel. This should be large enough
// to accomodate slower readers (messages are dropped if
// clients can not keep up).
let (bcast_tx, _) = broadcast::channel::<Event>(settings.limits.broadcast_buffer);
// validated events that need to be persisted are sent to the
// database on via this channel.
let (event_tx, event_rx) =
mpsc::channel::<SubmittedEvent>(settings.limits.event_persist_buffer);
// establish a channel for letting all threads now about a
// requested server shutdown.
let (invoke_shutdown, shutdown_listen) = broadcast::channel::<()>(1);
// create a channel for sending any new metadata event. These
// will get processed relatively slowly (a potentially
// multi-second blocking HTTP call) on a single thread, so we
// buffer requests on the channel. No harm in dropping events
// here, since we are protecting against DoS. This can make
// it difficult to setup initial metadata in bulk, since
// overwhelming this will drop events and won't register
// metadata events.
let (metadata_tx, metadata_rx) = broadcast::channel::<Event>(4096);
// start the database writer thread. Give it a channel for
// writing events, and for publishing events that have been
// written (to all connected clients).
db::db_writer(
event_rx,
bcast_tx.clone(),
metadata_tx.clone(),
shutdown_listen,
)
.await;
info!("db writer created");
// create a nip-05 verifier thread
let verifier_opt = nip05::Verifier::new(metadata_rx, bcast_tx.clone());
if let Ok(mut v) = verifier_opt {
if settings.verified_users.is_active() {
tokio::task::spawn(async move {
info!("starting up NIP-05 verifier...");
v.run().await;
let (_, ctrl_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
// run this in a new thread
let handle = thread::spawn(|| {
// we should have a 'control plane' channel to monitor and bump the server.
// this will let us do stuff like clear the database, shutdown, etc.
let _svr = start_server(settings, ctrl_rx);
});
}
}
// // listen for ctrl-c interruupts
let ctrl_c_shutdown = invoke_shutdown.clone();
tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap();
info!("shutting down due to SIGINT");
ctrl_c_shutdown.send(()).ok();
});
// build a connection pool for sqlite connections
let pool = db::build_pool(
"client query",
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY
| rusqlite::OpenFlags::SQLITE_OPEN_SHARED_CACHE,
settings.database.min_conn,
settings.database.max_conn,
true,
);
// A `Service` is needed for every connection, so this
// creates one from our `handle_request` function.
let make_svc = make_service_fn(|conn: &AddrStream| {
let svc_pool = pool.clone();
let remote_addr = conn.remote_addr();
let bcast = bcast_tx.clone();
let event = event_tx.clone();
let stop = invoke_shutdown.clone();
async move {
// service_fn converts our function into a `Service`
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
handle_web_request(
request,
svc_pool.clone(),
remote_addr,
bcast.clone(),
event.clone(),
stop.subscribe(),
)
}))
}
});
let server = Server::bind(&socket_addr)
.serve(make_svc)
.with_graceful_shutdown(shutdown_signal());
// run hyper
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
// our code
});
Ok(())
}
/// Nostr protocol messages from a client
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
#[serde(untagged)]
pub enum NostrMessage {
/// An `EVENT` message
EventMsg(EventCmd),
/// A `REQ` message
SubMsg(Subscription),
/// A `CLOSE` message
CloseMsg(CloseCmd),
}
/// Convert Message to NostrMessage
fn convert_to_msg(msg: String) -> Result<NostrMessage> {
let config = config::SETTINGS.read().unwrap();
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
match parsed_res {
Ok(m) => {
if let NostrMessage::EventMsg(_) = m {
if let Some(max_size) = config.limits.max_event_bytes {
// check length, ensure that some max size is set.
if msg.len() > max_size && max_size > 0 {
return Err(Error::EventMaxLengthError(msg.len()));
}
}
}
Ok(m)
}
Err(e) => {
debug!("proto parse error: {:?}", e);
debug!("parse error on message: {}", msg.trim());
Err(Error::ProtoParseError)
}
}
}
/// Handle new client connections. This runs through an event loop
/// for all client communication.
async fn nostr_server(
pool: db::SqlitePool,
mut ws_stream: WebSocketStream<Upgraded>,
broadcast: Sender<Event>,
event_tx: mpsc::Sender<SubmittedEvent>,
mut shutdown: Receiver<()>,
) {
// get a broadcast channel for clients to communicate on
let mut bcast_rx = broadcast.subscribe();
// Track internal client state
let mut conn = conn::ClientConn::new();
let cid = conn.get_client_prefix();
// Create a channel for receiving query results from the database.
// we will send out the tx handle to any query we generate.
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(256);
// Create channel for receiving NOTICEs
let (notice_tx, mut notice_rx) = mpsc::channel::<String>(32);
// maintain a hashmap of a oneshot channel for active subscriptions.
// when these subscriptions are cancelled, make a message
// available to the executing query so it knows to stop.
// last time this client sent data
let mut last_message_time = Instant::now();
// ping interval (every 5 minutes)
let default_ping_dur = Duration::from_secs(300);
// disconnect after 20 minutes without a ping response or event.
let max_quiet_time = Duration::from_secs(60 * 20);
let start = tokio::time::Instant::now() + default_ping_dur;
let mut ping_interval = tokio::time::interval_at(start, default_ping_dur);
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
// for stats, keep track of how many events the client published,
// and how many it received from queries.
let mut client_published_event_count: usize = 0;
let mut client_received_event_count: usize = 0;
info!("new connection for client: {:?}", cid);
loop {
tokio::select! {
_ = shutdown.recv() => {
// server shutting down, exit loop
break;
},
_ = ping_interval.tick() => {
// check how long since we talked to client
// if it has been too long, disconnect
if last_message_time.elapsed() > max_quiet_time {
debug!("ending connection due to lack of client ping response");
break;
}
// Send a ping
ws_stream.send(Message::Ping(Vec::new())).await.ok();
},
Some(notice_msg) = notice_rx.recv() => {
let n = notice_msg.to_string().replace("\"", "");
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", n))).await.ok();
},
Some(query_result) = query_rx.recv() => {
// database informed us of a query result we asked for
client_received_event_count += 1;
// send a result
let subesc = query_result.sub_id.replace("\"", "");
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
ws_stream.send(Message::Text(send_str)).await.ok();
},
// TODO: consider logging the LaggedRecv error
Ok(global_event) = bcast_rx.recv() => {
// an event has been broadcast to all clients
// first check if there is a subscription for this event.
let matching_subs = conn.get_matching_subscriptions(&global_event);
for s in matching_subs {
// TODO: serialize at broadcast time, instead of
// once for each consumer.
if let Ok(event_str) = serde_json::to_string(&global_event) {
debug!("sub match: client: {:?}, sub: {:?}, event: {:?}",
cid, s,
global_event.get_event_id_prefix());
// create an event response and send it
let subesc = s.replace("\"", "");
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{}\",{}]", subesc, event_str))).await.ok();
//nostr_stream.send(res).await.ok();
} else {
warn!("could not serialize event {:?}", global_event.get_event_id_prefix());
}
}
},
ws_next = ws_stream.next() => {
// update most recent message time for client
last_message_time = Instant::now();
// Consume text messages from the client, parse into Nostr messages.
let nostr_msg = match ws_next {
Some(Ok(Message::Text(m))) => {
convert_to_msg(m)
},
Some(Ok(Message::Binary(_))) => {
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "binary messages are not accepted"))).await.ok();
continue;
},
Some(Ok(Message::Ping(_))) | Some(Ok(Message::Pong(_))) => {
// get a ping/pong, ignore
continue;
},
None | Some(Ok(Message::Close(_))) | Some(Err(WsError::AlreadyClosed)) | Some(Err(WsError::ConnectionClosed)) => {
debug!("normal websocket close from client: {:?}",cid);
break;
},
x => {
info!("message was: {:?} (ignoring)", x);
continue;
}
};
// convert ws_next into proto_next
match nostr_msg {
Ok(NostrMessage::EventMsg(ec)) => {
// An EventCmd needs to be validated to be converted into an Event
// handle each type of message
let parsed : Result<Event> = Result::<Event>::from(ec);
match parsed {
Ok(e) => {
let id_prefix:String = e.id.chars().take(8).collect();
debug!("successfully parsed/validated event: {:?} from client: {:?}", id_prefix, cid);
// Write this to the database.
let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone() };
event_tx.send(submit_event).await.ok();
client_published_event_count += 1;
},
Err(_) => {
info!("client {:?} sent an invalid event", cid);
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "event was invalid"))).await.ok();
}
}
},
Ok(NostrMessage::SubMsg(s)) => {
debug!("client {} requesting a subscription", cid);
// subscription handling consists of:
// * registering the subscription so future events can be matched
// * making a channel to cancel to request later
// * sending a request for a SQL query
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
match conn.subscribe(s.clone()) {
Ok(()) => {
// when we insert, if there was a previous query running with the same name, cancel it.
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
previous_query.send(()).ok();
}
// start a database query
// show pool stats
debug!("DB pool stats: {:?}", pool.state());
db::db_query(s, pool.get().expect("could not get connection"), query_tx.clone(), abandon_query_rx).await;
},
Err(e) => {
info!("Subscription error: {}", e);
let s = e.to_string().replace("\"", "");
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", s))).await.ok();
}
}
},
Ok(NostrMessage::CloseMsg(cc)) => {
// closing a request simply removes the subscription.
let parsed : Result<Close> = Result::<Close>::from(cc);
match parsed {
Ok(c) => {
// check if a query is currently
// running, and remove it if so.
let stop_tx = running_queries.remove(&c.id);
if let Some(tx) = stop_tx {
tx.send(()).ok();
}
// stop checking new events against
// the subscription
conn.unsubscribe(c);
},
Err(_) => {
info!("invalid command ignored");
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "could not parse command"))).await.ok();
}
}
},
Err(Error::ConnError) => {
debug!("got connection close/error, disconnecting client: {:?}",cid);
break;
}
Err(Error::EventMaxLengthError(s)) => {
info!("client {:?} sent event larger ({} bytes) than max size", cid, s);
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "event exceeded max size"))).await.ok();
},
Err(Error::ProtoParseError) => {
info!("client {:?} sent event that could not be parsed", cid);
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "could not parse command"))).await.ok();
},
Err(e) => {
info!("got non-fatal error from client: {:?}, error: {:?}", cid, e);
},
}
},
}
}
// connection cleanup - ensure any still running queries are terminated.
for (_, stop_tx) in running_queries.into_iter() {
stop_tx.send(()).ok();
}
info!(
"stopping connection for client: {:?} (client sent {} event(s), received {})",
cid, client_published_event_count, client_received_event_count
);
// block on nostr thread to finish.
handle.join().unwrap();
}

View File

@@ -4,7 +4,7 @@
//! address with their public key, in metadata events. This module
//! consumes a stream of metadata events, and keeps a database table
//! updated with the current NIP-05 verification status.
use crate::config::SETTINGS;
use crate::config::VerifiedUsers;
use crate::db;
use crate::error::{Error, Result};
use crate::event::Event;
@@ -13,13 +13,13 @@ use hyper::body::HttpBody;
use hyper::client::connect::HttpConnector;
use hyper::Client;
use hyper_tls::HttpsConnector;
use log::*;
use rand::Rng;
use rusqlite::params;
use std::time::Duration;
use std::time::Instant;
use std::time::SystemTime;
use tokio::time::Interval;
use tracing::{debug, info, warn};
/// NIP-05 verifier state
pub struct Verifier {
@@ -31,6 +31,8 @@ pub struct Verifier {
read_pool: db::SqlitePool,
/// SQLite write query pool
write_pool: db::SqlitePool,
/// Settings
settings: crate::config::Settings,
/// HTTP client
client: hyper::Client<HttpsConnector<HttpConnector>, hyper::Body>,
/// After all accounts are updated, wait this long before checking again.
@@ -42,7 +44,7 @@ pub struct Verifier {
}
/// A NIP-05 identifier is a local part and domain.
#[derive(PartialEq, Debug, Clone)]
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct Nip05Name {
local: String,
domain: String,
@@ -138,11 +140,13 @@ impl Verifier {
pub fn new(
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
event_tx: tokio::sync::broadcast::Sender<Event>,
settings: crate::config::Settings,
) -> Result<Self> {
info!("creating NIP-05 verifier");
// build a database connection for reading and writing.
let write_pool = db::build_pool(
"nip05 writer",
&settings,
rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE,
1, // min conns
4, // max conns
@@ -150,6 +154,7 @@ impl Verifier {
);
let read_pool = db::build_pool(
"nip05 reader",
&settings,
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
1, // min conns
8, // max conns
@@ -174,6 +179,7 @@ impl Verifier {
event_tx,
read_pool,
write_pool,
settings,
client,
wait_after_finish,
http_wait_duration,
@@ -214,7 +220,11 @@ impl Verifier {
pubkey: &str,
) -> Result<UserWebVerificationStatus> {
// determine if this domain should be checked
if !is_domain_allowed(&nip.domain) {
if !is_domain_allowed(
&nip.domain,
&self.settings.verified_users.domain_whitelist,
&self.settings.verified_users.domain_blacklist,
) {
return Ok(UserWebVerificationStatus::DomainNotAllowed);
}
let url = nip
@@ -239,9 +249,9 @@ impl Verifier {
// HTTP request with timeout
match tokio::time::timeout(Duration::from_secs(5), response_fut).await {
Ok(response_res) => {
let response = response_res?;
// limit size of verification document to 1MB.
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
let response = response_res?;
// determine content length from response
let response_content_length = match response.body().size_hint().upper() {
Some(v) => v,
@@ -257,13 +267,12 @@ impl Verifier {
let body_matches = body_contains_user(&nip.local, pubkey, body_bytes)?;
if body_matches {
return Ok(UserWebVerificationStatus::Verified);
} else {
}
// successful response, parsed as a nip-05
// document, but this name/pubkey was not
// present.
return Ok(UserWebVerificationStatus::Unverified);
}
}
} else {
info!(
"content length missing or exceeded limits for account: {:?}",
@@ -347,15 +356,11 @@ impl Verifier {
/// Reverify the oldest user verification record.
async fn do_reverify(&mut self) -> Result<()> {
let reverify_setting;
let max_failures;
{
// this block prevents a read handle to settings being
// captured by the async DB call (guard is not Send)
let settings = SETTINGS.read().unwrap();
reverify_setting = settings.verified_users.verify_update_frequency_duration;
max_failures = settings.verified_users.max_consecutive_failures;
}
let reverify_setting = self
.settings
.verified_users
.verify_update_frequency_duration;
let max_failures = self.settings.verified_users.max_consecutive_failures;
// get from settings, but default to 6hrs between re-checking an account
let reverify_dur = reverify_setting.unwrap_or_else(|| Duration::from_secs(60 * 60 * 6));
// find all verification records that have success or failure OLDER than the reverify_dur.
@@ -506,17 +511,13 @@ impl Verifier {
let start = Instant::now();
// we should only do this if we are enabled. if we are
// disabled/passive, the event has already been persisted.
let should_write_event;
{
let settings = SETTINGS.read().unwrap();
should_write_event = settings.verified_users.is_enabled()
}
let should_write_event = self.settings.verified_users.is_enabled();
if should_write_event {
match db::write_event(&mut self.write_pool.get()?, event) {
Ok(updated) => {
if updated != 0 {
info!(
"persisted event: {:?} in {:?}",
"persisted event (new verified pubkey): {:?} in {:?}",
event.get_event_id_prefix(),
start.elapsed()
);
@@ -538,7 +539,7 @@ impl Verifier {
}
/// Result of checking user's verification status against DNS/HTTP.
#[derive(PartialEq, Debug, Clone)]
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum UserWebVerificationStatus {
Verified, // user is verified, as of now.
DomainNotAllowed, // domain blacklist or whitelist denied us from attempting a verification
@@ -547,7 +548,7 @@ pub enum UserWebVerificationStatus {
}
/// A NIP-05 verification record.
#[derive(PartialEq, Debug, Clone)]
#[derive(PartialEq, Eq, Debug, Clone)]
// Basic information for a verification event. Gives us all we need to assert a NIP-05 address is good.
pub struct VerificationRecord {
pub rowid: u64, // database row for this verification event
@@ -562,15 +563,18 @@ pub struct VerificationRecord {
/// Check with settings to determine if a given domain is allowed to
/// publish.
pub fn is_domain_allowed(domain: &str) -> bool {
let settings = SETTINGS.read().unwrap();
pub fn is_domain_allowed(
domain: &str,
whitelist: &Option<Vec<String>>,
blacklist: &Option<Vec<String>>,
) -> bool {
// if there is a whitelist, domain must be present in it.
if let Some(wl) = &settings.verified_users.domain_whitelist {
if let Some(wl) = whitelist {
// workaround for Vec contains not accepting &str
return wl.iter().any(|x| x == domain);
}
// otherwise, check that user is not in the blacklist
if let Some(bl) = &settings.verified_users.domain_blacklist {
if let Some(bl) = blacklist {
return !bl.iter().any(|x| x == domain);
}
true
@@ -579,17 +583,21 @@ pub fn is_domain_allowed(domain: &str) -> bool {
impl VerificationRecord {
/// Check if the record is recent enough to be considered valid,
/// and the domain is allowed.
pub fn is_valid(&self) -> bool {
let settings = SETTINGS.read().unwrap();
pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
//let settings = SETTINGS.read().unwrap();
// how long a verification record is good for
let nip05_expiration = &settings.verified_users.verify_expiration_duration;
let nip05_expiration = &verified_users_settings.verify_expiration_duration;
if let Some(e) = nip05_expiration {
if !self.is_current(e) {
return false;
}
}
// check domains
is_domain_allowed(&self.name.domain)
is_domain_allowed(
&self.name.domain,
&verified_users_settings.domain_whitelist,
&verified_users_settings.domain_blacklist,
)
}
/// Check if this record has been validated since the given
@@ -705,9 +713,7 @@ pub async fn get_oldest_user_verification(
conn: db::PooledConnection,
earliest: u64,
) -> Result<VerificationRecord> {
let res =
tokio::task::spawn_blocking(move || query_oldest_user_verification(conn, earliest)).await?;
res
tokio::task::spawn_blocking(move || query_oldest_user_verification(conn, earliest)).await?
}
pub fn query_oldest_user_verification(
@@ -715,7 +721,7 @@ pub fn query_oldest_user_verification(
earliest: u64,
) -> Result<VerificationRecord> {
let tx = conn.transaction()?;
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v INNER JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
let mut stmt = tx.prepare_cached(query)?;
let fields = stmt.query_row(params![earliest, earliest], |r| {
let rowid: u64 = r.get(0)?;

86
src/notice.rs Normal file
View File

@@ -0,0 +1,86 @@
pub enum EventResultStatus {
Saved,
Duplicate,
Invalid,
Blocked,
RateLimited,
Error,
}
pub struct EventResult {
pub id: String,
pub msg: String,
pub status: EventResultStatus,
}
pub enum Notice {
Message(String),
EventResult(EventResult),
}
impl EventResultStatus {
pub fn to_bool(&self) -> bool {
match self {
Self::Saved => true,
Self::Duplicate => true,
Self::Invalid => false,
Self::Blocked => false,
Self::RateLimited => false,
Self::Error => false,
}
}
pub fn prefix(&self) -> &'static str {
match self {
Self::Saved => "saved",
Self::Duplicate => "duplicate",
Self::Invalid => "invalid",
Self::Blocked => "blocked",
Self::RateLimited => "rate-limited",
Self::Error => "error",
}
}
}
impl Notice {
//pub fn err(err: error::Error, id: String) -> Notice {
// Notice::err_msg(format!("{}", err), id)
//}
pub fn message(msg: String) -> Notice {
Notice::Message(msg)
}
fn prefixed(id: String, msg: &str, status: EventResultStatus) -> Notice {
let msg = format!("{}: {}", status.prefix(), msg);
Notice::EventResult(EventResult { id, msg, status })
}
pub fn invalid(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Invalid)
}
pub fn blocked(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Blocked)
}
pub fn rate_limited(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::RateLimited)
}
pub fn duplicate(id: String) -> Notice {
Notice::prefixed(id, "", EventResultStatus::Duplicate)
}
pub fn error(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Error)
}
pub fn saved(id: String) -> Notice {
Notice::EventResult(EventResult {
id,
msg: "".into(),
status: EventResultStatus::Saved,
})
}
}

View File

@@ -1,30 +1,37 @@
//! Database schema and migrations
use crate::db::PooledConnection;
use crate::error::Result;
use crate::utils::is_hex;
use log::*;
use crate::event::{single_char_tagname, Event};
use crate::utils::is_lower_hex;
use const_format::formatcp;
use rusqlite::limits::Limit;
use rusqlite::params;
use rusqlite::Connection;
// TODO: drop the pubkey_ref and event_ref tables
use std::cmp::Ordering;
use std::time::Instant;
use tracing::{debug, error, info};
/// Startup DB Pragmas
pub const STARTUP_SQL: &str = r##"
PRAGMA main.synchronous=NORMAL;
PRAGMA foreign_keys = ON;
pragma mmap_size = 536870912; -- 512MB of mmap
PRAGMA journal_size_limit=32768;
pragma mmap_size = 17179869184; -- cap mmap at 16GB
"##;
/// Latest database version
pub const DB_VERSION: usize = 11;
/// Schema definition
const INIT_SQL: &str = r##"
const INIT_SQL: &str = formatcp!(
r##"
-- Database settings
PRAGMA encoding = "UTF-8";
PRAGMA journal_mode=WAL;
PRAGMA main.synchronous=NORMAL;
PRAGMA foreign_keys = ON;
PRAGMA application_id = 1654008667;
PRAGMA user_version = 5;
PRAGMA user_version = {};
-- Event Table
CREATE TABLE IF NOT EXISTS event (
@@ -33,6 +40,7 @@ event_hash BLOB NOT NULL, -- 4-byte hash
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970)
created_at INTEGER NOT NULL, -- when the event was authored
author BLOB NOT NULL, -- author pubkey
delegated_by BLOB, -- delegator pubkey (NIP-26)
kind INTEGER NOT NULL, -- event kind
hidden INTEGER, -- relevant for queries
content TEXT NOT NULL -- serialized json of event object
@@ -40,9 +48,10 @@ content TEXT NOT NULL -- serialized json of event object
-- Event Indexes
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS author_index ON event(author);
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
-- Tag Table
-- Tag values are stored as either a BLOB (if they come in as a
@@ -53,11 +62,13 @@ id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
name TEXT, -- the tag name ("p", "e", whatever)
value TEXT, -- the tag value, if not hex.
value_hex BLOB, -- the tag value, if it can be interpreted as a hex string.
value_hex BLOB, -- the tag value, if it can be interpreted as a lowercase hex string.
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
CREATE INDEX IF NOT EXISTS tag_val_hex_index ON tag(value_hex);
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
-- NIP-05 User Validation
CREATE TABLE IF NOT EXISTS user_verification (
@@ -71,19 +82,37 @@ FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CAS
);
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
"##;
"##,
DB_VERSION
);
/// Determine the current application database schema version.
pub fn db_version(conn: &mut Connection) -> Result<usize> {
pub fn curr_db_version(conn: &mut Connection) -> Result<usize> {
let query = "PRAGMA user_version;";
let curr_version = conn.query_row(query, [], |row| row.get(0))?;
Ok(curr_version)
}
fn mig_init(conn: &mut PooledConnection) -> Result<usize> {
match conn.execute_batch(INIT_SQL) {
Ok(()) => {
info!(
"database pragma/schema initialized to v{}, and ready",
DB_VERSION
);
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be initialized");
}
}
Ok(DB_VERSION)
}
/// Upgrade DB to latest version, and execute pragma settings
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
// check the version.
let mut curr_version = db_version(conn)?;
let mut curr_version = curr_db_version(conn)?;
info!("DB version = {:?}", curr_version);
debug!(
@@ -99,19 +128,79 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH) as f64 / (1024 * 1024) as f64).floor()
);
match curr_version.cmp(&DB_VERSION) {
// Database is new or not current
Ordering::Less => {
// initialize from scratch
if curr_version == 0 {
match conn.execute_batch(INIT_SQL) {
Ok(()) => {
info!("database pragma/schema initialized to v4, and ready");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be initialized");
}
}
curr_version = mig_init(conn)?;
}
// for initialized but out-of-date schemas, proceed to
// upgrade sequentially until we are current.
if curr_version == 1 {
curr_version = mig_1_to_2(conn)?;
}
if curr_version == 2 {
curr_version = mig_2_to_3(conn)?;
}
if curr_version == 3 {
curr_version = mig_3_to_4(conn)?;
}
if curr_version == 4 {
curr_version = mig_4_to_5(conn)?;
}
if curr_version == 5 {
curr_version = mig_5_to_6(conn)?;
}
if curr_version == 6 {
curr_version = mig_6_to_7(conn)?;
}
if curr_version == 7 {
curr_version = mig_7_to_8(conn)?;
}
if curr_version == 8 {
curr_version = mig_8_to_9(conn)?;
}
if curr_version == 9 {
curr_version = mig_9_to_10(conn)?;
}
if curr_version == 10 {
curr_version = mig_10_to_11(conn)?;
}
if curr_version == DB_VERSION {
info!(
"All migration scripts completed successfully. Welcome to v{}.",
DB_VERSION
);
}
}
// Database is current, all is good
Ordering::Equal => {
debug!("Database version was already current (v{})", DB_VERSION);
}
// Database is newer than what this code understands, abort
Ordering::Greater => {
panic!(
"Database version is newer than supported by this executable (v{} > v{})",
curr_version, DB_VERSION
);
}
}
// Setup PRAGMA
conn.execute_batch(STARTUP_SQL)?;
debug!("SQLite PRAGMA startup completed");
Ok(())
}
//// Migration Scripts
fn mig_1_to_2(conn: &mut PooledConnection) -> Result<usize> {
// only change is adding a hidden column to events.
let upgrade_sql = r##"
ALTER TABLE event ADD hidden INTEGER;
@@ -121,15 +210,16 @@ PRAGMA user_version = 2;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v1 -> v2");
curr_version = 2;
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(2)
}
if curr_version == 2 {
fn mig_2_to_3(conn: &mut PooledConnection) -> Result<usize> {
// this version lacks the tag column
info!("database schema needs update from 2->3");
let upgrade_sql = r##"
@@ -147,14 +237,12 @@ PRAGMA user_version = 3;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v2 -> v3");
curr_version = 3;
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
info!("Starting transaction");
// iterate over every event/pubkey tag
let tx = conn.transaction()?;
{
@@ -166,7 +254,7 @@ PRAGMA user_version = 3;
let tag_name: String = row.get(1)?;
let tag_value: String = row.get(2)?;
// this will leave behind p/e tags that were non-hex, but they are invalid anyways.
if is_hex(&tag_value) {
if is_lower_hex(&tag_value) {
tx.execute(
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
params![event_id, tag_name, hex::decode(&tag_value).ok()],
@@ -174,10 +262,12 @@ PRAGMA user_version = 3;
}
}
}
info!("Updated tag values");
tx.commit()?;
info!("Upgrade complete");
Ok(3)
}
if curr_version == 3 {
fn mig_3_to_4(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 3->4");
let upgrade_sql = r##"
-- incoming metadata events with nip05
@@ -197,16 +287,16 @@ PRAGMA user_version = 4;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v3 -> v4");
curr_version = 4;
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(4)
}
if curr_version == 4 {
fn mig_4_to_5(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 4->5");
let upgrade_sql = r##"
DROP TABLE IF EXISTS event_ref;
@@ -216,22 +306,166 @@ PRAGMA user_version=5;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v4 -> v5");
// uncomment if we have a newer version
//curr_version = 5;
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
} else if curr_version == 5 {
debug!("Database version was already current");
} else if curr_version > 5 {
panic!("Database version is newer than supported by this executable");
Ok(5)
}
// Setup PRAGMA
conn.execute_batch(STARTUP_SQL)?;
debug!("SQLite PRAGMA startup completed");
Ok(())
fn mig_5_to_6(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 5->6");
// We need to rebuild the tags table. iterate through the
// event table. build event from json, insert tags into a
// fresh tag table. This was needed due to a logic error in
// how hex-like tags got indexed.
let start = Instant::now();
let tx = conn.transaction()?;
{
// Clear out table
tx.execute("DELETE FROM tag;", [])?;
let mut stmt = tx.prepare("select id, content from event order by id;")?;
let mut tag_rows = stmt.query([])?;
while let Some(row) = tag_rows.next()? {
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_id: u64 = row.get(0)?;
let event_json: String = row.get(1)?;
let event: Event = serde_json::from_str(&event_json)?;
// look at each event, and each tag, creating new tag entries if appropriate.
for t in event.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
// safe because len was > 1
let tagval = t.get(1).unwrap();
// insert as BLOB if we can restore it losslessly.
// this means it needs to be even length and lowercase.
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
tx.execute(
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
params![event_id, tagname, hex::decode(tagval).ok()],
)?;
} else {
// otherwise, insert as text
tx.execute(
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
params![event_id, tagname, &tagval],
)?;
}
}
}
tx.execute("PRAGMA user_version = 6;", [])?;
}
tx.commit()?;
info!("database schema upgraded v5 -> v6 in {:?}", start.elapsed());
// vacuum after large table modification
let start = Instant::now();
conn.execute("VACUUM;", [])?;
info!("vacuumed DB after tags rebuild in {:?}", start.elapsed());
Ok(6)
}
fn mig_6_to_7(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 6->7");
// only change is adding a hidden column to events.
let upgrade_sql = r##"
ALTER TABLE event ADD delegated_by BLOB;
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
PRAGMA user_version = 7;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v6 -> v7");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(7)
}
fn mig_7_to_8(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 7->8");
// Remove redundant indexes, and add a better multi-column index.
let upgrade_sql = r##"
DROP INDEX IF EXISTS created_at_index;
DROP INDEX IF EXISTS kind_index;
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
PRAGMA user_version = 8;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v7 -> v8");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(8)
}
fn mig_8_to_9(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 8->9");
// Those old indexes were actually helpful...
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
PRAGMA user_version = 9;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v8 -> v9");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(9)
}
fn mig_9_to_10(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 9->10");
// Those old indexes were actually helpful...
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
PRAGMA user_version = 10;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v9 -> v10");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(10)
}
fn mig_10_to_11(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 10->11");
// Those old indexes were actually helpful...
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
reindex;
pragma optimize;
PRAGMA user_version = 11;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v10 -> v11");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(11)
}

719
src/server.rs Normal file
View File

@@ -0,0 +1,719 @@
//! Server process
use crate::close::Close;
use crate::close::CloseCmd;
use crate::config::{Settings, VerifiedUsersMode};
use crate::conn;
use crate::db;
use crate::db::SubmittedEvent;
use crate::error::{Error, Result};
use crate::event::Event;
use crate::event::EventCmd;
use crate::info::RelayInfo;
use crate::nip05;
use crate::notice::Notice;
use crate::subscription::Subscription;
use futures::SinkExt;
use futures::StreamExt;
use governor::{Jitter, Quota, RateLimiter};
use http::header::HeaderMap;
use hyper::header::ACCEPT;
use hyper::service::{make_service_fn, service_fn};
use hyper::upgrade::Upgraded;
use hyper::{
header, server::conn::AddrStream, upgrade, Body, Request, Response, Server, StatusCode,
};
use rusqlite::OpenFlags;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::collections::HashMap;
use std::convert::Infallible;
use std::net::SocketAddr;
use std::path::Path;
use std::sync::mpsc::Receiver as MpscReceiver;
use std::time::Duration;
use std::time::Instant;
use tokio::runtime::Builder;
use tokio::sync::broadcast::{self, Receiver, Sender};
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tokio_tungstenite::WebSocketStream;
use tracing::*;
use tungstenite::error::CapacityError::MessageTooLong;
use tungstenite::error::Error as WsError;
use tungstenite::handshake;
use tungstenite::protocol::Message;
use tungstenite::protocol::WebSocketConfig;
/// Handle arbitrary HTTP requests, including for WebSocket upgrades.
async fn handle_web_request(
mut request: Request<Body>,
pool: db::SqlitePool,
settings: Settings,
remote_addr: SocketAddr,
broadcast: Sender<Event>,
event_tx: tokio::sync::mpsc::Sender<SubmittedEvent>,
shutdown: Receiver<()>,
) -> Result<Response<Body>, Infallible> {
match (
request.uri().path(),
request.headers().contains_key(header::UPGRADE),
) {
// Request for / as websocket
("/", true) => {
trace!("websocket with upgrade request");
//assume request is a handshake, so create the handshake response
let response = match handshake::server::create_response_with_body(&request, || {
Body::empty()
}) {
Ok(response) => {
//in case the handshake response creation succeeds,
//spawn a task to handle the websocket connection
tokio::spawn(async move {
//using the hyper feature of upgrading a connection
match upgrade::on(&mut request).await {
//if successfully upgraded
Ok(upgraded) => {
// set WebSocket configuration options
let config = WebSocketConfig {
max_message_size: settings.limits.max_ws_message_bytes,
max_frame_size: settings.limits.max_ws_frame_bytes,
..Default::default()
};
//create a websocket stream from the upgraded object
let ws_stream = WebSocketStream::from_raw_socket(
//pass the upgraded object
//as the base layer stream of the Websocket
upgraded,
tokio_tungstenite::tungstenite::protocol::Role::Server,
Some(config),
)
.await;
let user_agent = get_header_string("user-agent", request.headers());
// determine the remote IP from headers if the exist
let header_ip = settings
.network
.remote_ip_header
.as_ref()
.and_then(|x| get_header_string(x, request.headers()));
// use the socket addr as a backup
let remote_ip =
header_ip.unwrap_or_else(|| remote_addr.ip().to_string());
let client_info = ClientInfo {
remote_ip,
user_agent,
};
// spawn a nostr server with our websocket
tokio::spawn(nostr_server(
pool,
client_info,
settings,
ws_stream,
broadcast,
event_tx,
shutdown,
));
}
// todo: trace, don't print...
Err(e) => println!(
"error when trying to upgrade connection \
from address {} to websocket connection. \
Error is: {}",
remote_addr, e
),
}
});
//return the response to the handshake request
response
}
Err(error) => {
warn!("websocket response failed");
let mut res =
Response::new(Body::from(format!("Failed to create websocket: {}", error)));
*res.status_mut() = StatusCode::BAD_REQUEST;
return Ok(res);
}
};
Ok::<_, Infallible>(response)
}
// Request for Relay info
("/", false) => {
// handle request at root with no upgrade header
// Check if this is a nostr server info request
let accept_header = &request.headers().get(ACCEPT);
// check if application/nostr+json is included
if let Some(media_types) = accept_header {
if let Ok(mt_str) = media_types.to_str() {
if mt_str.contains("application/nostr+json") {
// build a relay info response
debug!("Responding to server info request");
let rinfo = RelayInfo::from(settings.info);
let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap());
return Ok(Response::builder()
.status(200)
.header("Content-Type", "application/nostr+json")
.header("Access-Control-Allow-Origin", "*")
.body(b)
.unwrap());
}
}
}
Ok(Response::builder()
.status(200)
.header("Content-Type", "text/plain")
.body(Body::from("Please use a Nostr client to connect."))
.unwrap())
}
(_, _) => {
//handle any other url
Ok(Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::from("Nothing here."))
.unwrap())
}
}
}
fn get_header_string(header: &str, headers: &HeaderMap) -> Option<String> {
headers
.get(header)
.and_then(|x| x.to_str().ok().map(|x| x.to_string()))
}
// return on a control-c or internally requested shutdown signal
async fn ctrl_c_or_signal(mut shutdown_signal: Receiver<()>) {
let mut term_signal = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
.expect("could not define signal");
loop {
tokio::select! {
_ = shutdown_signal.recv() => {
info!("Shutting down webserver as requested");
// server shutting down, exit loop
break;
},
_ = tokio::signal::ctrl_c() => {
info!("Shutting down webserver due to SIGINT");
break;
},
_ = term_signal.recv() => {
info!("Shutting down webserver due to SIGTERM");
break;
},
}
}
}
/// Start running a Nostr relay server.
pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result<(), Error> {
trace!("Config: {:?}", settings);
// do some config validation.
if !Path::new(&settings.database.data_directory).is_dir() {
error!("Database directory does not exist");
return Err(Error::DatabaseDirError);
}
let addr = format!(
"{}:{}",
settings.network.address.trim(),
settings.network.port
);
let socket_addr = addr.parse().expect("listening address not valid");
// address whitelisting settings
if let Some(addr_whitelist) = &settings.authorization.pubkey_whitelist {
info!(
"Event publishing restricted to {} pubkey(s)",
addr_whitelist.len()
);
}
// check if NIP-05 enforced user verification is on
if settings.verified_users.is_active() {
info!(
"NIP-05 user verification mode:{:?}",
settings.verified_users.mode
);
if let Some(d) = settings.verified_users.verify_update_duration() {
info!("NIP-05 check user verification every: {:?}", d);
}
if let Some(d) = settings.verified_users.verify_expiration_duration() {
info!("NIP-05 user verification expires after: {:?}", d);
}
if let Some(wl) = &settings.verified_users.domain_whitelist {
info!("NIP-05 domain whitelist: {:?}", wl);
}
if let Some(bl) = &settings.verified_users.domain_blacklist {
info!("NIP-05 domain blacklist: {:?}", bl);
}
}
// configure tokio runtime
let rt = Builder::new_multi_thread()
.enable_all()
.thread_name("tokio-ws")
// limit concurrent SQLite blocking threads
.max_blocking_threads(settings.limits.max_blocking_threads)
.on_thread_start(|| {
trace!("started new thread");
})
.on_thread_stop(|| {
trace!("stopping thread");
})
.build()
.unwrap();
// start tokio
rt.block_on(async {
let broadcast_buffer_limit = settings.limits.broadcast_buffer;
let persist_buffer_limit = settings.limits.event_persist_buffer;
let verified_users_active = settings.verified_users.is_active();
let db_min_conn = settings.database.min_conn;
let db_max_conn = settings.database.max_conn;
let settings = settings.clone();
info!("listening on: {}", socket_addr);
// all client-submitted valid events are broadcast to every
// other client on this channel. This should be large enough
// to accomodate slower readers (messages are dropped if
// clients can not keep up).
let (bcast_tx, _) = broadcast::channel::<Event>(broadcast_buffer_limit);
// validated events that need to be persisted are sent to the
// database on via this channel.
let (event_tx, event_rx) = mpsc::channel::<SubmittedEvent>(persist_buffer_limit);
// establish a channel for letting all threads now about a
// requested server shutdown.
let (invoke_shutdown, shutdown_listen) = broadcast::channel::<()>(1);
// create a channel for sending any new metadata event. These
// will get processed relatively slowly (a potentially
// multi-second blocking HTTP call) on a single thread, so we
// buffer requests on the channel. No harm in dropping events
// here, since we are protecting against DoS. This can make
// it difficult to setup initial metadata in bulk, since
// overwhelming this will drop events and won't register
// metadata events.
let (metadata_tx, metadata_rx) = broadcast::channel::<Event>(4096);
// start the database writer thread. Give it a channel for
// writing events, and for publishing events that have been
// written (to all connected clients).
db::db_writer(
settings.clone(),
event_rx,
bcast_tx.clone(),
metadata_tx.clone(),
shutdown_listen,
)
.await;
info!("db writer created");
// create a nip-05 verifier thread; if enabled.
if settings.verified_users.mode != VerifiedUsersMode::Disabled {
let verifier_opt =
nip05::Verifier::new(metadata_rx, bcast_tx.clone(), settings.clone());
if let Ok(mut v) = verifier_opt {
if verified_users_active {
tokio::task::spawn(async move {
info!("starting up NIP-05 verifier...");
v.run().await;
});
}
}
}
// build a connection pool for DB maintenance
let maintenance_pool = db::build_pool(
"maintenance writer",
&settings,
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
1,
1,
false,
);
db::db_maintenance(maintenance_pool).await;
// listen for (external to tokio) shutdown request
let controlled_shutdown = invoke_shutdown.clone();
tokio::spawn(async move {
info!("control message listener started");
match shutdown_rx.recv() {
Ok(()) => {
info!("control message requesting shutdown");
controlled_shutdown.send(()).ok();
}
Err(std::sync::mpsc::RecvError) => {
// FIXME: spurious error on startup?
debug!("shutdown requestor is disconnected");
}
};
});
// listen for ctrl-c interruupts
let ctrl_c_shutdown = invoke_shutdown.clone();
// listener for webserver shutdown
let webserver_shutdown_listen = invoke_shutdown.subscribe();
tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap();
info!("shutting down due to SIGINT (main)");
ctrl_c_shutdown.send(()).ok();
});
// build a connection pool for sqlite connections
let pool = db::build_pool(
"client query",
&settings,
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
db_min_conn,
db_max_conn,
true,
);
// A `Service` is needed for every connection, so this
// creates one from our `handle_request` function.
let make_svc = make_service_fn(|conn: &AddrStream| {
let svc_pool = pool.clone();
let remote_addr = conn.remote_addr();
let bcast = bcast_tx.clone();
let event = event_tx.clone();
let stop = invoke_shutdown.clone();
let settings = settings.clone();
async move {
// service_fn converts our function into a `Service`
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
handle_web_request(
request,
svc_pool.clone(),
settings.clone(),
remote_addr,
bcast.clone(),
event.clone(),
stop.subscribe(),
)
}))
}
});
let server = Server::bind(&socket_addr)
.serve(make_svc)
.with_graceful_shutdown(ctrl_c_or_signal(webserver_shutdown_listen));
// run hyper in this thread. This is why the thread does not return.
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
});
Ok(())
}
/// Nostr protocol messages from a client
#[derive(Deserialize, Serialize, Clone, PartialEq, Eq, Debug)]
#[serde(untagged)]
pub enum NostrMessage {
/// An `EVENT` message
EventMsg(EventCmd),
/// A `REQ` message
SubMsg(Subscription),
/// A `CLOSE` message
CloseMsg(CloseCmd),
}
/// Convert Message to NostrMessage
fn convert_to_msg(msg: String, max_bytes: Option<usize>) -> Result<NostrMessage> {
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
match parsed_res {
Ok(m) => {
if let NostrMessage::EventMsg(_) = m {
if let Some(max_size) = max_bytes {
// check length, ensure that some max size is set.
if msg.len() > max_size && max_size > 0 {
return Err(Error::EventMaxLengthError(msg.len()));
}
}
}
Ok(m)
}
Err(e) => {
debug!("proto parse error: {:?}", e);
debug!("parse error on message: {}", msg.trim());
Err(Error::ProtoParseError)
}
}
}
/// Turn a string into a NOTICE message ready to send over a WebSocket
fn make_notice_message(notice: Notice) -> Message {
let json = match notice {
Notice::Message(ref msg) => json!(["NOTICE", msg]),
Notice::EventResult(ref res) => json!(["OK", res.id, res.status.to_bool(), res.msg]),
};
Message::text(json.to_string())
}
struct ClientInfo {
remote_ip: String,
user_agent: Option<String>,
}
/// Handle new client connections. This runs through an event loop
/// for all client communication.
async fn nostr_server(
pool: db::SqlitePool,
client_info: ClientInfo,
settings: Settings,
mut ws_stream: WebSocketStream<Upgraded>,
broadcast: Sender<Event>,
event_tx: mpsc::Sender<SubmittedEvent>,
mut shutdown: Receiver<()>,
) {
// the time this websocket nostr server started
let orig_start = Instant::now();
// get a broadcast channel for clients to communicate on
let mut bcast_rx = broadcast.subscribe();
// Track internal client state
let mut conn = conn::ClientConn::new(client_info.remote_ip);
// subscription creation rate limiting
let mut sub_lim_opt = None;
// 100ms jitter when the rate limiter returns
let jitter = Jitter::up_to(Duration::from_millis(100));
let sub_per_min_setting = settings.limits.subscriptions_per_min;
if let Some(sub_per_min) = sub_per_min_setting {
if sub_per_min > 0 {
trace!("Rate limits for sub creation ({}/min)", sub_per_min);
let quota_time = core::num::NonZeroU32::new(sub_per_min).unwrap();
let quota = Quota::per_minute(quota_time);
sub_lim_opt = Some(RateLimiter::direct(quota));
}
}
// Use the remote IP as the client identifier
let cid = conn.get_client_prefix();
// Create a channel for receiving query results from the database.
// we will send out the tx handle to any query we generate.
// this has capacity for some of the larger requests we see, which
// should allow the DB thread to release the handle earlier.
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(20000);
// Create channel for receiving NOTICEs
let (notice_tx, mut notice_rx) = mpsc::channel::<Notice>(128);
// last time this client sent data (message, ping, etc.)
let mut last_message_time = Instant::now();
// ping interval (every 5 minutes)
let default_ping_dur = Duration::from_secs(settings.network.ping_interval_seconds.into());
// disconnect after 20 minutes without a ping response or event.
let max_quiet_time = Duration::from_secs(60 * 20);
let start = tokio::time::Instant::now() + default_ping_dur;
let mut ping_interval = tokio::time::interval_at(start, default_ping_dur);
// maintain a hashmap of a oneshot channel for active subscriptions.
// when these subscriptions are cancelled, make a message
// available to the executing query so it knows to stop.
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
// for stats, keep track of how many events the client published,
// and how many it received from queries.
let mut client_published_event_count: usize = 0;
let mut client_received_event_count: usize = 0;
debug!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
if let Some(ua) = client_info.user_agent {
debug!("cid: {}, user-agent: {:?}", cid, ua);
}
loop {
tokio::select! {
_ = shutdown.recv() => {
info!("Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?}", cid, conn.ip(), orig_start.elapsed());
// server shutting down, exit loop
break;
},
_ = ping_interval.tick() => {
// check how long since we talked to client
// if it has been too long, disconnect
if last_message_time.elapsed() > max_quiet_time {
debug!("ending connection due to lack of client ping response");
break;
}
// Send a ping
ws_stream.send(Message::Ping(Vec::new())).await.ok();
},
Some(notice_msg) = notice_rx.recv() => {
ws_stream.send(make_notice_message(notice_msg)).await.ok();
},
Some(query_result) = query_rx.recv() => {
// database informed us of a query result we asked for
let subesc = query_result.sub_id.replace('"', "");
if query_result.event == "EOSE" {
let send_str = format!("[\"EOSE\",\"{}\"]", subesc);
ws_stream.send(Message::Text(send_str)).await.ok();
} else {
client_received_event_count += 1;
// send a result
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
ws_stream.send(Message::Text(send_str)).await.ok();
}
},
// TODO: consider logging the LaggedRecv error
Ok(global_event) = bcast_rx.recv() => {
// an event has been broadcast to all clients
// first check if there is a subscription for this event.
for (s, sub) in conn.subscriptions() {
if !sub.interested_in_event(&global_event) {
continue;
}
// TODO: serialize at broadcast time, instead of
// once for each consumer.
if let Ok(event_str) = serde_json::to_string(&global_event) {
trace!("sub match for client: {}, sub: {:?}, event: {:?}",
cid, s,
global_event.get_event_id_prefix());
// create an event response and send it
let subesc = s.replace('"', "");
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{}\",{}]", subesc, event_str))).await.ok();
} else {
warn!("could not serialize event: {:?}", global_event.get_event_id_prefix());
}
}
},
ws_next = ws_stream.next() => {
// update most recent message time for client
last_message_time = Instant::now();
// Consume text messages from the client, parse into Nostr messages.
let nostr_msg = match ws_next {
Some(Ok(Message::Text(m))) => {
convert_to_msg(m,settings.limits.max_event_bytes)
},
Some(Ok(Message::Binary(_))) => {
ws_stream.send(
make_notice_message(Notice::message("binary messages are not accepted".into()))).await.ok();
continue;
},
Some(Ok(Message::Ping(_) | Message::Pong(_))) => {
// get a ping/pong, ignore. tungstenite will
// send responses automatically.
continue;
},
Some(Err(WsError::Capacity(MessageTooLong{size, max_size}))) => {
ws_stream.send(
make_notice_message(Notice::message(format!("message too large ({} > {})",size, max_size)))).await.ok();
continue;
},
None |
Some(Ok(Message::Close(_)) |
Err(WsError::AlreadyClosed | WsError::ConnectionClosed |
WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
=> {
debug!("websocket close from client (cid: {}, ip: {:?})",cid, conn.ip());
break;
},
Some(Err(WsError::Io(e))) => {
// IO errors are considered fatal
warn!("IO error (cid: {}, ip: {:?}): {:?}", cid, conn.ip(), e);
break;
}
x => {
// default condition on error is to close the client connection
info!("unknown error (cid: {}, ip: {:?}): {:?} (closing conn)", cid, conn.ip(), x);
break;
}
};
// convert ws_next into proto_next
match nostr_msg {
Ok(NostrMessage::EventMsg(ec)) => {
// An EventCmd needs to be validated to be converted into an Event
// handle each type of message
let evid = ec.event_id().to_owned();
let parsed : Result<Event> = Result::<Event>::from(ec);
match parsed {
Ok(e) => {
let id_prefix:String = e.id.chars().take(8).collect();
debug!("successfully parsed/validated event: {:?} (cid: {})", id_prefix, cid);
// check if the event is too far in the future.
if e.is_valid_timestamp(settings.options.reject_future_seconds) {
// Write this to the database.
let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone() };
event_tx.send(submit_event).await.ok();
client_published_event_count += 1;
} else {
info!("client: {} sent a far future-dated event", cid);
if let Some(fut_sec) = settings.options.reject_future_seconds {
let msg = format!("The event created_at field is out of the acceptable range (+{}sec) for this relay.",fut_sec);
let notice = Notice::invalid(e.id, &msg);
ws_stream.send(make_notice_message(notice)).await.ok();
}
}
},
Err(e) => {
info!("client sent an invalid event (cid: {})", cid);
ws_stream.send(make_notice_message(Notice::invalid(evid, &format!("{}", e)))).await.ok();
}
}
},
Ok(NostrMessage::SubMsg(s)) => {
debug!("subscription requested (cid: {}, sub: {:?})", cid, s.id);
// subscription handling consists of:
// * check for rate limits
// * registering the subscription so future events can be matched
// * making a channel to cancel to request later
// * sending a request for a SQL query
// Do nothing if the sub already exists.
if !conn.has_subscription(&s) {
if let Some(ref lim) = sub_lim_opt {
lim.until_ready_with_jitter(jitter).await;
}
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
match conn.subscribe(s.clone()) {
Ok(()) => {
// when we insert, if there was a previous query running with the same name, cancel it.
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
previous_query.send(()).ok();
}
// start a database query. this spawns a blocking database query on a worker thread.
db::db_query(s, cid.to_owned(), pool.clone(), query_tx.clone(), abandon_query_rx).await;
},
Err(e) => {
info!("Subscription error: {} (cid: {}, sub: {:?})", e, cid, s.id);
ws_stream.send(make_notice_message(Notice::message(format!("Subscription error: {}", e)))).await.ok();
}
}
} else {
info!("client sent duplicate subscription, ignoring (cid: {}, sub: {:?})", cid, s.id);
}
},
Ok(NostrMessage::CloseMsg(cc)) => {
// closing a request simply removes the subscription.
let parsed : Result<Close> = Result::<Close>::from(cc);
if let Ok(c) = parsed {
// check if a query is currently
// running, and remove it if so.
let stop_tx = running_queries.remove(&c.id);
if let Some(tx) = stop_tx {
tx.send(()).ok();
}
// stop checking new events against
// the subscription
conn.unsubscribe(&c);
} else {
info!("invalid command ignored");
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
}
},
Err(Error::ConnError) => {
debug!("got connection close/error, disconnecting cid: {}, ip: {:?}",cid, conn.ip());
break;
}
Err(Error::EventMaxLengthError(s)) => {
info!("client sent event larger ({} bytes) than max size (cid: {})", s, cid);
ws_stream.send(make_notice_message(Notice::message("event exceeded max size".into()))).await.ok();
},
Err(Error::ProtoParseError) => {
info!("client sent event that could not be parsed (cid: {})", cid);
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
},
Err(e) => {
info!("got non-fatal error from client (cid: {}, error: {:?}", cid, e);
},
}
},
}
}
// connection cleanup - ensure any still running queries are terminated.
for (_, stop_tx) in running_queries {
stop_tx.send(()).ok();
}
info!(
"stopping client connection (cid: {}, ip: {:?}, sent: {} events, recv: {} events, connected: {:?})",
cid,
conn.ip(),
client_published_event_count,
client_received_event_count,
orig_start.elapsed()
);
}

View File

@@ -8,7 +8,7 @@ use std::collections::HashMap;
use std::collections::HashSet;
/// Subscription identifier and set of request filters
#[derive(Serialize, PartialEq, Debug, Clone)]
#[derive(Serialize, PartialEq, Eq, Debug, Clone)]
pub struct Subscription {
pub id: String,
pub filters: Vec<ReqFilter>,
@@ -19,7 +19,7 @@ pub struct Subscription {
/// Corresponds to client-provided subscription request elements. Any
/// element can be present if it should be used in filtering, or
/// absent ([`None`]) if it should be ignored.
#[derive(Serialize, PartialEq, Debug, Clone)]
#[derive(Serialize, PartialEq, Eq, Debug, Clone)]
pub struct ReqFilter {
/// Event hashes
pub ids: Option<Vec<String>>,
@@ -31,9 +31,16 @@ pub struct ReqFilter {
pub until: Option<u64>,
/// List of author public keys
pub authors: Option<Vec<String>>,
/// Limit number of results
pub limit: Option<u64>,
/// Set of tags
#[serde(skip)]
pub tags: Option<HashMap<String, HashSet<String>>>,
pub tags: Option<HashMap<char, HashSet<String>>>,
/// Force no matches due to malformed data
// we can't represent it in the req filter, so we don't want to
// erroneously match. This basically indicates the req tried to
// do something invalid.
pub force_no_match: bool,
}
impl<'de> Deserialize<'de> for ReqFilter {
@@ -54,7 +61,9 @@ impl<'de> Deserialize<'de> for ReqFilter {
since: None,
until: None,
authors: None,
limit: None,
tags: None,
force_no_match: false,
};
let mut ts = None;
// iterate through each key, and assign values that exist
@@ -68,11 +77,12 @@ impl<'de> Deserialize<'de> for ReqFilter {
rf.since = Deserialize::deserialize(val).ok();
} else if key == "until" {
rf.until = Deserialize::deserialize(val).ok();
} else if key == "limit" {
rf.limit = Deserialize::deserialize(val).ok();
} else if key == "authors" {
rf.authors = Deserialize::deserialize(val).ok();
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
// remove the prefix
let tagname = &key[1..];
if let Some(tag_search) = tag_search_char_from_filter(key) {
if ts.is_none() {
// Initialize the tag if necessary
ts = Some(HashMap::new());
@@ -81,9 +91,14 @@ impl<'de> Deserialize<'de> for ReqFilter {
let tag_vals: Option<Vec<String>> = Deserialize::deserialize(val).ok();
if let Some(v) = tag_vals {
let hs = HashSet::from_iter(v.into_iter());
m.insert(tagname.to_owned(), hs);
m.insert(tag_search.to_owned(), hs);
}
};
} else {
// tag search that is multi-character, don't add to subscription
rf.force_no_match = true;
continue;
}
}
}
rf.tags = ts;
@@ -91,6 +106,26 @@ impl<'de> Deserialize<'de> for ReqFilter {
}
}
/// Attempt to form a single-char identifier from a tag search filter
fn tag_search_char_from_filter(tagname: &str) -> Option<char> {
let tagname_nohash = &tagname[1..];
// We return the tag character if and only if the tagname consists
// of a single char.
let mut tagnamechars = tagname_nohash.chars();
let firstchar = tagnamechars.next();
match firstchar {
Some(_) => {
// check second char
if tagnamechars.next().is_none() {
firstchar
} else {
None
}
}
None => None,
}
}
impl<'de> Deserialize<'de> for Subscription {
/// Custom deserializer for subscriptions, which have a more
/// complex structure than the other message types.
@@ -185,11 +220,22 @@ impl ReqFilter {
.unwrap_or(true)
}
fn delegated_authors_match(&self, event: &Event) -> bool {
if let Some(delegated_pubkey) = &event.delegated_by {
self.authors
.as_ref()
.map(|vs| prefix_match(vs, delegated_pubkey))
.unwrap_or(true)
} else {
false
}
}
fn tag_match(&self, event: &Event) -> bool {
// get the hashset from the filter.
if let Some(map) = &self.tags {
for (key, val) in map.iter() {
let tag_match = event.generic_tag_val_intersect(key, val);
let tag_match = event.generic_tag_val_intersect(*key, val);
// if there is no match for this tag, the match fails.
if !tag_match {
return false;
@@ -214,9 +260,11 @@ impl ReqFilter {
// self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
self.ids_match(event)
&& self.since.map(|t| event.created_at > t).unwrap_or(true)
&& self.until.map(|t| event.created_at < t).unwrap_or(true)
&& self.kind_match(event.kind)
&& self.authors_match(event)
&& (self.authors_match(event) || self.delegated_authors_match(event))
&& self.tag_match(event)
&& !self.force_no_match
}
}
@@ -274,6 +322,7 @@ mod tests {
let e = Event {
id: "foo".to_owned(),
pubkey: "abcd".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
@@ -292,6 +341,7 @@ mod tests {
let e = Event {
id: "abcd".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
@@ -310,6 +360,7 @@ mod tests {
let e = Event {
id: "abcde".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
@@ -321,6 +372,52 @@ mod tests {
Ok(())
}
#[test]
fn interest_until() -> Result<()> {
// subscription with a filter for ID and time
let s: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "until": 1000}]"#)?;
let e = Event {
id: "abc".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 50,
kind: 0,
tags: Vec::new(),
content: "".to_owned(),
sig: "".to_owned(),
tagidx: None,
};
assert!(s.interested_in_event(&e));
Ok(())
}
#[test]
fn interest_range() -> Result<()> {
// subscription with a filter for ID and time
let s_in: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 200}]"#)?;
let s_before: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 140}]"#)?;
let s_after: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 160, "until": 200}]"#)?;
let e = Event {
id: "abc".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 150,
kind: 0,
tags: Vec::new(),
content: "".to_owned(),
sig: "".to_owned(),
tagidx: None,
};
assert!(s_in.interested_in_event(&e));
assert!(!s_before.interested_in_event(&e));
assert!(!s_after.interested_in_event(&e));
Ok(())
}
#[test]
fn interest_time_and_id() -> Result<()> {
// subscription with a filter for ID and time
@@ -329,6 +426,7 @@ mod tests {
let e = Event {
id: "abc".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 50,
kind: 0,
tags: Vec::new(),
@@ -347,6 +445,7 @@ mod tests {
let e = Event {
id: "abc".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 1001,
kind: 0,
tags: Vec::new(),
@@ -365,6 +464,7 @@ mod tests {
let e = Event {
id: "abc".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
@@ -383,6 +483,7 @@ mod tests {
let e = Event {
id: "123".to_owned(),
pubkey: "abc".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
@@ -393,14 +494,15 @@ mod tests {
assert!(s.interested_in_event(&e));
Ok(())
}
#[test]
#[test]
fn authors_multi_pubkey() -> Result<()> {
// check for any of a set of authors, against the pubkey
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors":["abc", "bcd"]}]"#)?;
let e = Event {
id: "123".to_owned(),
pubkey: "bcd".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
@@ -419,6 +521,7 @@ mod tests {
let e = Event {
id: "123".to_owned(),
pubkey: "xyz".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),

View File

@@ -13,3 +13,21 @@ pub fn unix_time() -> u64 {
pub fn is_hex(s: &str) -> bool {
s.chars().all(|x| char::is_ascii_hexdigit(&x))
}
/// Check if a string contains only lower-case hex chars.
pub fn is_lower_hex(s: &str) -> bool {
s.chars().all(|x| {
(char::is_ascii_lowercase(&x) || char::is_ascii_digit(&x)) && char::is_ascii_hexdigit(&x)
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn lower_hex() {
let hexstr = "abcd0123";
assert_eq!(is_lower_hex(hexstr), true);
}
}

110
tests/common/mod.rs Normal file
View File

@@ -0,0 +1,110 @@
use anyhow::{anyhow, Result};
use nostr_rs_relay::config;
use nostr_rs_relay::server::start_server;
//use http::{Request, Response};
use hyper::{Client, StatusCode, Uri};
use std::net::TcpListener;
use std::sync::atomic::{AtomicU16, Ordering};
use std::sync::mpsc as syncmpsc;
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
use std::thread;
use std::thread::JoinHandle;
use std::time::Duration;
use tracing::{debug, info};
pub struct Relay {
pub port: u16,
pub handle: JoinHandle<()>,
pub shutdown_tx: MpscSender<()>,
}
pub fn start_relay() -> Result<Relay> {
// setup tracing
let _trace_sub = tracing_subscriber::fmt::try_init();
info!("Starting a new relay");
// replace default settings
let mut settings = config::Settings::default();
// identify open port
info!("Checking for address...");
let port = get_available_port().unwrap();
info!("Found open port: {}", port);
// bind to local interface only
settings.network.address = "127.0.0.1".to_owned();
settings.network.port = port;
// create an in-memory DB with multiple readers
settings.database.in_memory = true;
settings.database.min_conn = 4;
settings.database.max_conn = 8;
let (shutdown_tx, shutdown_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
let handle = thread::spawn(|| {
// server will block the thread it is run on.
let _ = start_server(settings, shutdown_rx);
});
// how do we know the relay has finished starting up?
Ok(Relay {
port,
handle,
shutdown_tx,
})
}
// check if the server is healthy via HTTP request
async fn server_ready(relay: &Relay) -> Result<bool> {
let uri: String = format!("http://127.0.0.1:{}/", relay.port);
let client = Client::new();
let uri: Uri = uri.parse().unwrap();
let res = client.get(uri).await?;
Ok(res.status() == StatusCode::OK)
}
pub async fn wait_for_healthy_relay(relay: &Relay) -> Result<()> {
// TODO: maximum time to wait for server to become healthy.
// give it a little time to start up before we start polling
tokio::time::sleep(Duration::from_millis(10)).await;
loop {
let server_check = server_ready(relay).await;
match server_check {
Ok(true) => {
// server responded with 200-OK.
break;
}
Ok(false) => {
// server responded with an error, we're done.
return Err(anyhow!("Got non-200-OK from relay"));
}
Err(_) => {
// server is not yet ready, probably connection refused...
debug!("Relay not ready, will try again...");
tokio::time::sleep(Duration::from_millis(10)).await;
}
}
}
info!("relay is ready");
Ok(())
// simple message sent to web browsers
//let mut request = Request::builder()
// .uri("https://www.rust-lang.org/")
// .header("User-Agent", "my-awesome-agent/1.0");
}
// from https://elliotekj.com/posts/2017/07/25/find-available-tcp-port-rust/
// This needed some modification; if multiple tasks all ask for open ports, they will tend to get the same one.
// instead we should try to try these incrementally/globally.
static PORT_COUNTER: AtomicU16 = AtomicU16::new(4030);
fn get_available_port() -> Option<u16> {
let startsearch = PORT_COUNTER.fetch_add(10, Ordering::SeqCst);
if startsearch >= 20000 {
// wrap around
PORT_COUNTER.store(4030, Ordering::Relaxed);
}
(startsearch..20000).find(|port| port_is_available(*port))
}
pub fn port_is_available(port: u16) -> bool {
info!("checking on port {}", port);
match TcpListener::bind(("127.0.0.1", port)) {
Ok(_) => true,
Err(_) => false,
}
}

47
tests/integration_test.rs Normal file
View File

@@ -0,0 +1,47 @@
use anyhow::Result;
use std::thread;
use std::time::Duration;
mod common;
#[tokio::test]
async fn start_and_stop() -> Result<()> {
// this will be the common pattern for acquiring a new relay:
// start a fresh relay, on a port to-be-provided back to us:
let relay = common::start_relay()?;
// wait for the relay's webserver to start up and deliver a page:
common::wait_for_healthy_relay(&relay).await?;
let port = relay.port;
// just make sure we can startup and shut down.
// if we send a shutdown message before the server is listening,
// we will get a SendError. Keep sending until someone is
// listening.
loop {
let shutdown_res = relay.shutdown_tx.send(());
match shutdown_res {
Ok(()) => {
break;
}
Err(_) => {
thread::sleep(Duration::from_millis(100));
}
}
}
// wait for relay to shutdown
let thread_join = relay.handle.join();
assert!(thread_join.is_ok());
// assert that port is now available.
assert!(common::port_is_available(port));
Ok(())
}
#[tokio::test]
async fn relay_home_page() -> Result<()> {
// get a relay and wait for startup...
let relay = common::start_relay()?;
common::wait_for_healthy_relay(&relay).await?;
// tell relay to shutdown
let _res = relay.shutdown_tx.send(());
Ok(())
}