1
0
mirror of https://github.com/scsibug/nostr-rs-relay.git synced 2025-08-04 01:58:28 -04:00

Compare commits

...

482 Commits

Author SHA1 Message Date
Greg Heartsfield
d72af96d5f refactor: rustfmt 2025-02-23 11:23:22 -06:00
Greg Heartsfield
b4234eae25 refactor: clippy suggestions 2025-02-23 11:22:12 -06:00
Greg Heartsfield
d73cde2844 fix: only send events to the nip05 verifier if grpc permits
Prior to this, the NIP05 verifier would receieve metadata events for
GRPC-denied events.  If  the verifier passed, it would try to write
the verification record, but referencing a non-existent event, which
would fail with a SQL constraint error.
2025-02-23 11:04:35 -06:00
Greg Heartsfield
afbd7559e8 improvement: update dependencies
Updating addr2line v0.22.0 -> v0.24.2
Removing adler v1.0.2
Updating allocator-api2 v0.2.18 -> v0.2.21
Updating anstream v0.6.15 -> v0.6.18
Updating anstyle v1.0.8 -> v1.0.10
Updating anstyle-parse v0.2.5 -> v0.2.6
Updating anstyle-query v1.1.1 -> v1.1.2
Updating anstyle-wincon v3.0.4 -> v3.0.7
Updating anyhow v1.0.86 -> v1.0.96
Updating async-executor v1.13.0 -> v1.13.1
Removing async-io v1.13.0
Removing async-io v2.3.4
  Adding async-io v2.4.0
Removing async-lock v2.8.0
Updating async-std v1.12.0 -> v1.13.0
Updating async-stream v0.3.5 -> v0.3.6
Updating async-stream-impl v0.3.5 -> v0.3.6
Updating async-trait v0.1.82 -> v0.1.86
Updating autocfg v1.3.0 -> v1.4.0
Updating backtrace v0.3.73 -> v0.3.74
Updating bitflags v2.6.0 -> v2.8.0
Updating bumpalo v3.16.0 -> v3.17.0
Updating bytes v1.7.1 -> v1.10.0
Updating cc v1.1.16 -> v1.2.15
Updating chrono v0.4.38 -> v0.4.39
Updating clap v4.5.17 -> v4.5.30
Updating clap_builder v4.5.17 -> v4.5.30
Updating clap_derive v4.5.13 -> v4.5.28
Updating clap_lex v0.7.2 -> v0.7.4
Updating colorchoice v1.0.2 -> v1.0.3
Updating console v0.15.8 -> v0.15.10
Updating const_format v0.2.33 -> v0.2.34
Updating const_format_proc_macros v0.2.33 -> v0.2.34
Updating cpufeatures v0.2.14 -> v0.2.17
Updating crossbeam-channel v0.5.13 -> v0.5.14
Updating crossbeam-queue v0.3.11 -> v0.3.12
Updating crossbeam-utils v0.8.20 -> v0.8.21
  Adding displaydoc v0.2.5
Updating encode_unicode v0.3.6 -> v1.0.0
Updating equivalent v1.0.1 -> v1.0.2
Updating errno v0.3.9 -> v0.3.10
Updating event-listener v5.3.1 -> v5.4.0
Updating event-listener-strategy v0.5.2 -> v0.5.3
Removing fastrand v1.9.0
Removing fastrand v2.1.1
  Adding fastrand v2.3.0
Updating flate2 v1.0.33 -> v1.0.35
Updating futures v0.3.30 -> v0.3.31
Updating futures-channel v0.3.30 -> v0.3.31
Updating futures-core v0.3.30 -> v0.3.31
Updating futures-executor v0.3.30 -> v0.3.31
Updating futures-io v0.3.30 -> v0.3.31
Removing futures-lite v1.13.0
Removing futures-lite v2.3.0
  Adding futures-lite v2.6.0
Updating futures-macro v0.3.30 -> v0.3.31
Updating futures-sink v0.3.30 -> v0.3.31
Updating futures-task v0.3.30 -> v0.3.31
Updating futures-util v0.3.30 -> v0.3.31
  Adding getrandom v0.3.1
Updating gimli v0.29.0 -> v0.31.1
Updating gloo-timers v0.2.6 -> v0.3.0
  Adding hashbrown v0.15.2
Removing hermit-abi v0.3.9
Updating home v0.5.9 -> v0.5.11
Updating httparse v1.9.4 -> v1.10.0
Updating hyper v0.14.30 -> v0.14.32 (available: v1.6.0)
Updating iana-time-zone v0.1.60 -> v0.1.61
  Adding icu_collections v1.5.0
  Adding icu_locid v1.5.0
  Adding icu_locid_transform v1.5.0
  Adding icu_locid_transform_data v1.5.0
  Adding icu_normalizer v1.5.0
  Adding icu_normalizer_data v1.5.0
  Adding icu_properties v1.5.1
  Adding icu_properties_data v1.5.0
  Adding icu_provider v1.5.0
  Adding icu_provider_macros v1.5.0
Updating idna v0.5.0 -> v1.0.3
  Adding idna_adapter v1.2.0
Updating indexmap v2.5.0 -> v2.7.1
Updating indicatif v0.17.8 -> v0.17.11
Updating inout v0.1.3 -> v0.1.4
Removing io-lifetimes v1.0.11
Updating itoa v1.0.11 -> v1.0.14
Updating js-sys v0.3.70 -> v0.3.77
Updating libc v0.2.158 -> v0.2.170
Removing linux-raw-sys v0.3.8
Removing linux-raw-sys v0.4.14
  Adding linux-raw-sys v0.4.15
  Adding litemap v0.7.4
Updating log v0.4.22 -> v0.4.26
Removing miniz_oxide v0.7.4
Removing miniz_oxide v0.8.0
  Adding miniz_oxide v0.8.5
Updating mio v1.0.2 -> v1.0.3
Updating object v0.36.4 -> v0.36.7
Updating once_cell v1.19.0 -> v1.20.3
Updating openssl-probe v0.1.5 -> v0.1.6
Updating parking v2.2.0 -> v2.2.1
Updating pathdiff v0.2.1 -> v0.2.3
Updating pest v2.7.12 -> v2.7.15
Updating pest_derive v2.7.12 -> v2.7.15
Updating pest_generator v2.7.12 -> v2.7.15
Updating pest_meta v2.7.12 -> v2.7.15
Updating pin-project v1.1.5 -> v1.1.9
Updating pin-project-internal v1.1.5 -> v1.1.9
Updating pin-project-lite v0.2.14 -> v0.2.16
Updating pkg-config v0.3.30 -> v0.3.31
Removing polling v2.8.0
Removing polling v3.7.3
  Adding polling v3.7.4
Updating portable-atomic v1.7.0 -> v1.10.0
Updating proc-macro2 v1.0.86 -> v1.0.93
Updating quote v1.0.37 -> v1.0.38
Updating redox_syscall v0.5.3 -> v0.5.9
Updating regex v1.10.6 -> v1.11.1
Updating regex-automata v0.4.7 -> v0.4.9
Updating regex-syntax v0.8.4 -> v0.8.5
Updating ring v0.17.8 -> v0.17.11
Removing rustix v0.37.27
Removing rustix v0.38.36
  Adding rustix v0.38.44
Updating rustversion v1.0.17 -> v1.0.19
Updating ryu v1.0.18 -> v1.0.19
Updating schannel v0.1.23 -> v0.1.27
Updating security-framework-sys v2.11.1 -> v2.14.0
Updating serde v1.0.209 -> v1.0.218
Updating serde_derive v1.0.209 -> v1.0.218
Updating serde_json v1.0.128 -> v1.0.139
Updating smallvec v1.13.2 -> v1.14.0
Removing socket2 v0.4.10
Removing socket2 v0.5.7
  Adding socket2 v0.5.8
Removing spin v0.9.8
  Adding stable_deref_trait v1.2.0
Updating syn v2.0.77 -> v2.0.98
  Adding synstructure v0.13.1
Updating tempfile v3.12.0 -> v3.17.1
Removing thiserror v1.0.63
  Adding thiserror v1.0.69 (available: v2.0.11)
  Adding thiserror v2.0.11
Removing thiserror-impl v1.0.63
  Adding thiserror-impl v1.0.69
  Adding thiserror-impl v2.0.11
Updating time v0.3.36 -> v0.3.37
Updating time-macros v0.2.18 -> v0.2.19
  Adding tinystr v0.7.6
Updating tinyvec v1.8.0 -> v1.8.1
Updating tokio v1.40.0 -> v1.43.0
Updating tokio-macros v2.4.0 -> v2.5.0
Updating tokio-stream v0.1.16 -> v0.1.17
Updating tokio-util v0.7.12 -> v0.7.13
Updating tracing v0.1.40 -> v0.1.41
Updating tracing-attributes v0.1.27 -> v0.1.28
Updating tracing-core v0.1.32 -> v0.1.33
Updating tracing-subscriber v0.3.18 -> v0.3.19
Updating typenum v1.17.0 -> v1.18.0
Updating ucd-trie v0.1.6 -> v0.1.7
Updating unicode-bidi v0.3.15 -> v0.3.18
Updating unicode-ident v1.0.12 -> v1.0.17
Updating unicode-normalization v0.1.23 -> v0.1.24
Updating unicode-properties v0.1.2 -> v0.1.3
Updating unicode-segmentation v1.11.0 -> v1.12.0
Updating unicode-width v0.1.13 -> v0.2.0
Updating unicode-xid v0.2.5 -> v0.2.6
Updating url v2.5.2 -> v2.5.4
  Adding utf16_iter v1.0.5
  Adding utf8_iter v1.0.4
Updating uuid v1.10.0 -> v1.14.0
Updating valuable v0.1.0 -> v0.1.1
Updating value-bag v1.9.0 -> v1.10.0
Removing waker-fn v1.2.0
  Adding wasi v0.13.3+wasi-0.2.2
Updating wasm-bindgen v0.2.93 -> v0.2.100
Updating wasm-bindgen-backend v0.2.93 -> v0.2.100
Updating wasm-bindgen-futures v0.4.43 -> v0.4.50
Updating wasm-bindgen-macro v0.2.93 -> v0.2.100
Updating wasm-bindgen-macro-support v0.2.93 -> v0.2.100
Updating wasm-bindgen-shared v0.2.93 -> v0.2.100
Updating web-sys v0.3.70 -> v0.3.77
  Adding web-time v1.1.0
Removing windows-sys v0.48.0
Removing windows-targets v0.48.5
Removing windows_aarch64_gnullvm v0.48.5
Removing windows_aarch64_msvc v0.48.5
Removing windows_i686_gnu v0.48.5
Removing windows_i686_msvc v0.48.5
Removing windows_x86_64_gnu v0.48.5
Removing windows_x86_64_gnullvm v0.48.5
Removing windows_x86_64_msvc v0.48.5
  Adding wit-bindgen-rt v0.33.0
  Adding write16 v1.0.0
  Adding writeable v0.5.5
  Adding yoke v0.7.5
  Adding yoke-derive v0.7.5
  Adding zerofrom v0.1.5
  Adding zerofrom-derive v0.1.5
  Adding zerovec v0.10.4
  Adding zerovec-derive v0.10.3
2025-02-23 11:01:27 -06:00
laanwj
a6b48620fd fix: sqlite schema comment
`event_hash` is the raw SHA256 hash of the event, not 4-byte hash.
2024-10-19 07:12:11 -05:00
Greg Heartsfield
d71f5cb029 docs: remove reference to defunct anigma channel 2024-10-04 16:54:10 -05:00
Greg Heartsfield
1ed8cc08cc docs: point github CI badge to main repo 2024-10-04 08:34:42 -05:00
Greg Heartsfield
ff65ec2acd build: bump version to 0.9.0 2024-09-06 11:10:41 -05:00
Greg Heartsfield
4461648c64 improvement: update dependencies
Updating addr2line v0.21.0 -> v0.22.0
Adding adler2 v2.0.0
Updating allocator-api2 v0.2.16 -> v0.2.18
Updating anstream v0.6.13 -> v0.6.15
Updating anstyle v1.0.6 -> v1.0.8
Updating anstyle-parse v0.2.3 -> v0.2.5
Updating anstyle-query v1.0.2 -> v1.1.1
Updating anstyle-wincon v3.0.2 -> v3.0.4
Updating anyhow v1.0.81 -> v1.0.86
Updating async-channel v2.2.0 -> v2.3.1
Updating async-executor v1.8.0 -> v1.13.0
Updating async-io v2.3.2 -> v2.3.4
Updating async-lock v3.3.0 -> v3.4.0
Updating async-task v4.7.0 -> v4.7.1
Updating async-trait v0.1.79 -> v0.1.82
Updating autocfg v1.2.0 -> v1.3.0
Updating backtrace v0.3.71 -> v0.3.73
Updating bitflags v2.5.0 -> v2.6.0
Updating blocking v1.5.1 -> v1.6.1
Updating bumpalo v3.15.4 -> v3.16.0
Updating bytes v1.6.0 -> v1.7.1
Updating cc v1.0.90 -> v1.1.16
Updating chrono v0.4.37 -> v0.4.38
Updating clap v4.5.4 -> v4.5.17
Updating clap_builder v4.5.2 -> v4.5.17
Updating clap_derive v4.5.4 -> v4.5.13
Updating clap_lex v0.7.0 -> v0.7.2
Updating colorchoice v1.0.0 -> v1.0.2
Updating concurrent-queue v2.4.0 -> v2.5.0
Updating const_format v0.2.32 -> v0.2.33
Updating const_format_proc_macros v0.2.32 -> v0.2.33
Updating core-foundation-sys v0.8.6 -> v0.8.7
Updating cpufeatures v0.2.12 -> v0.2.14
Updating crc v3.0.1 -> v3.2.1
Updating crc32fast v1.4.0 -> v1.4.2
Updating crossbeam-channel v0.5.12 -> v0.5.13
Updating crossbeam-utils v0.8.19 -> v0.8.20
Updating either v1.10.0 -> v1.13.0
Updating errno v0.3.8 -> v0.3.9
Removing event-listener v4.0.3
Removing event-listener v5.2.0
Adding event-listener v5.3.1
Removing event-listener-strategy v0.4.0
Removing event-listener-strategy v0.5.0
Adding event-listener-strategy v0.5.2
Updating fastrand v2.0.2 -> v2.1.1
Removing finl_unicode v1.2.0
Updating flate2 v1.0.28 -> v1.0.33
Updating getrandom v0.2.12 -> v0.2.15
Updating gimli v0.28.1 -> v0.29.0
Updating h2 v0.3.25 -> v0.3.26
Updating hashbrown v0.14.3 -> v0.14.5
Adding hermit-abi v0.4.0
Updating httparse v1.8.0 -> v1.9.4
Updating hyper v0.14.28 -> v0.14.30
Updating indexmap v2.2.6 -> v2.5.0
Updating instant v0.1.12 -> v0.1.13
Adding is_terminal_polyfill v1.70.1
Removing itertools v0.12.1
Updating js-sys v0.3.69 -> v0.3.70
Updating lazy_static v1.4.0 -> v1.5.0
Updating libc v0.2.153 -> v0.2.158
Updating libredox v0.0.1 -> v0.1.3
Updating linux-raw-sys v0.4.13 -> v0.4.14
Updating lock_api v0.4.11 -> v0.4.12
Updating log v0.4.21 -> v0.4.22
Updating memchr v2.7.2 -> v2.7.4
Removing miniz_oxide v0.7.2
Adding miniz_oxide v0.7.4
Adding miniz_oxide v0.8.0
Updating mio v0.8.11 -> v1.0.2
Updating num-iter v0.1.44 -> v0.1.45
Updating num-traits v0.2.18 -> v0.2.19
Removing num_cpus v1.16.0
Updating object v0.32.2 -> v0.36.4
Updating parking_lot v0.12.1 -> v0.12.3
Updating parking_lot_core v0.9.9 -> v0.9.10
Updating paste v1.0.14 -> v1.0.15
Updating pest v2.7.8 -> v2.7.12
Updating pest_derive v2.7.8 -> v2.7.12
Updating pest_generator v2.7.8 -> v2.7.12
Updating pest_meta v2.7.8 -> v2.7.12
Updating petgraph v0.6.4 -> v0.6.5
Updating pin-project-lite v0.2.13 -> v0.2.14
Updating piper v0.2.1 -> v0.2.4
Updating polling v3.6.0 -> v3.7.3
Updating portable-atomic v1.6.0 -> v1.7.0
Updating ppv-lite86 v0.2.17 -> v0.2.20
Updating proc-macro2 v1.0.79 -> v1.0.86
Updating prometheus v0.13.3 -> v0.13.4
Updating quote v1.0.35 -> v1.0.37
Updating redox_syscall v0.4.1 -> v0.5.3
Updating redox_users v0.4.4 -> v0.4.6
Updating regex v1.10.4 -> v1.10.6
Updating regex-automata v0.4.6 -> v0.4.7
Updating regex-syntax v0.8.3 -> v0.8.4
Updating rustc-demangle v0.1.23 -> v0.1.24
Updating rustix v0.38.32 -> v0.38.36
Updating rustls v0.21.10 -> v0.21.12
Updating rustversion v1.0.14 -> v1.0.17
Updating ryu v1.0.17 -> v1.0.18
Updating security-framework v2.9.2 -> v2.11.1
Updating security-framework-sys v2.9.1 -> v2.11.1
Updating serde v1.0.197 -> v1.0.209
Updating serde_derive v1.0.197 -> v1.0.209
Updating serde_json v1.0.115 -> v1.0.128
Adding shlex v1.3.0
Updating signal-hook-registry v1.4.1 -> v1.4.2
Updating socket2 v0.5.6 -> v0.5.7
Updating sqlformat v0.2.3 -> v0.2.6
Updating stringprep v0.1.4 -> v0.1.5
Updating strsim v0.11.0 -> v0.11.1
Updating subtle v2.5.0 -> v2.6.1
Updating syn v2.0.55 -> v2.0.77
Updating tempfile v3.10.1 -> v3.12.0
Updating thiserror v1.0.58 -> v1.0.63
Updating thiserror-impl v1.0.58 -> v1.0.63
Updating tinyvec v1.6.0 -> v1.8.0
Updating tokio v1.36.0 -> v1.40.0
Updating tokio-macros v2.2.0 -> v2.4.0
Updating tokio-stream v0.1.15 -> v0.1.16
Updating tokio-util v0.7.10 -> v0.7.12
Updating tower-layer v0.3.2 -> v0.3.3
Updating tower-service v0.3.2 -> v0.3.3
Adding unicode-properties v0.1.2
Updating unicode-width v0.1.11 -> v0.1.13
Updating unicode-xid v0.2.4 -> v0.2.5
Updating url v2.5.0 -> v2.5.2
Updating utf8parse v0.2.1 -> v0.2.2
Updating uuid v1.8.0 -> v1.10.0
Updating value-bag v1.8.1 -> v1.9.0
Updating version_check v0.9.4 -> v0.9.5
Updating waker-fn v1.1.1 -> v1.2.0
Updating wasm-bindgen v0.2.92 -> v0.2.93
Updating wasm-bindgen-backend v0.2.92 -> v0.2.93
Updating wasm-bindgen-futures v0.4.42 -> v0.4.43
Updating wasm-bindgen-macro v0.2.92 -> v0.2.93
Updating wasm-bindgen-macro-support v0.2.92 -> v0.2.93
Updating wasm-bindgen-shared v0.2.92 -> v0.2.93
Updating web-sys v0.3.69 -> v0.3.70
Updating whoami v1.5.1 -> v1.5.2
Adding windows-sys v0.59.0
Updating windows-targets v0.52.4 -> v0.52.6
Updating windows_aarch64_gnullvm v0.52.4 -> v0.52.6
Updating windows_aarch64_msvc v0.52.4 -> v0.52.6
Updating windows_i686_gnu v0.52.4 -> v0.52.6
Adding windows_i686_gnullvm v0.52.6
Updating windows_i686_msvc v0.52.4 -> v0.52.6
Updating windows_x86_64_gnu v0.52.4 -> v0.52.6
Updating windows_x86_64_gnullvm v0.52.4 -> v0.52.6
Updating windows_x86_64_msvc v0.52.4 -> v0.52.6
Updating zerocopy v0.7.32 -> v0.7.35
Updating zerocopy-derive v0.7.32 -> v0.7.35
2024-09-06 11:05:42 -05:00
kuba
6329acd82b feat: add custom relay page 2024-09-06 10:44:15 -05:00
zappityzap
05411eb9e3 refactor: separate conditional dependencies
- move cln-rpc to regular dependencies
- add whitespace to separate conditional dependencies

fixes build on MSVC and OpenBSD
2024-09-06 10:33:56 -05:00
Joseph Goulden
5a21890625 feat: add cln payment processor 2024-08-14 11:36:07 -05:00
Joseph Goulden
0d04b5eefa add nix flake 2024-08-14 11:34:41 -05:00
Daniel Emery
07198b2cb9 refactor: add explicit logging when NIP-05 verification requests fail 2024-08-14 11:27:58 -05:00
Daniel Cadenas
af6d101c21 fix: id based sqlite query
Queries using the `id` field were failing after the refactor of
removing hexranges.
2024-08-14 11:21:44 -05:00
jesterhodl
5ad318e6e8 fix: time compilation 2024-08-14 10:48:07 -05:00
zappityzap
914ec77617 fix: openbsd build 2024-08-14 10:45:57 -05:00
Greg Heartsfield
4f518fd0e7 fix: prevent thread panic on large tag values using postgres
Exceptionally large tag values (thousands of characters) can result in
an error from postgres: index row size exceeds btree version 4 maximum
2704 for index "tag_value_idx".  This panics the writer thread, and
prevents further writes from succeeding.

This change simply removes the unwrap, allowing the error to propagate
where it is sent as a write error back to the client.  The error
message could be improved.

https://github.com/scsibug/nostr-rs-relay/issues/196
2024-06-06 17:32:13 -05:00
Laszlo Megyer
b04ab76e73 fix: postgresql tag filtering for odd-length hex-looking values
The tag filtering code misses odd-length strings that contains only hex digits [0-9a-f].
This fix makes the condition for `has_plain_values` the inverse of the condition for `has_hex_values`.

Fixes 
2024-04-03 02:51:12 +00:00
Greg Heartsfield
39a3a258a0 refactor: clippy suggestions 2024-03-28 10:15:00 -05:00
Greg Heartsfield
44c6e3d88b improvement: upgrade dependencies
Updating aes v0.8.3 -> v0.8.4
Removing ahash v0.7.7
Removing ahash v0.8.6
Adding ahash v0.7.8
Adding ahash v0.8.11
Updating aho-corasick v1.1.2 -> v1.1.3
Updating anstream v0.6.4 -> v0.6.13
Updating anstyle v1.0.4 -> v1.0.6
Updating anstyle-parse v0.2.2 -> v0.2.3
Updating anstyle-query v1.0.0 -> v1.0.2
Updating anstyle-wincon v3.0.1 -> v3.0.2
Updating anyhow v1.0.75 -> v1.0.81
Updating async-channel v2.1.1 -> v2.2.0
Updating async-global-executor v2.4.0 -> v2.4.1
Updating async-io v2.2.1 -> v2.3.2
Updating async-lock v3.1.2 -> v3.3.0
Updating async-task v4.5.0 -> v4.7.0
Updating async-trait v0.1.74 -> v0.1.79
Updating autocfg v1.1.0 -> v1.2.0
Updating backtrace v0.3.69 -> v0.3.71
Updating base64 v0.21.5 -> v0.21.7
Updating bitflags v2.4.1 -> v2.5.0
Updating bumpalo v3.14.0 -> v3.15.4
Updating bytes v1.5.0 -> v1.6.0
Updating cc v1.0.83 -> v1.0.90
Updating chrono v0.4.31 -> v0.4.37
Updating clap v4.4.10 -> v4.5.4
Updating clap_builder v4.4.9 -> v4.5.2
Updating clap_derive v4.4.7 -> v4.5.4
Updating clap_lex v0.6.0 -> v0.7.0
Updating concurrent-queue v2.3.0 -> v2.4.0
Updating console v0.15.7 -> v0.15.8
Updating cpufeatures v0.2.11 -> v0.2.12
Updating crc32fast v1.3.2 -> v1.4.0
Updating crossbeam-channel v0.5.8 -> v0.5.12
Updating crossbeam-queue v0.3.8 -> v0.3.11
Updating crossbeam-utils v0.8.16 -> v0.8.19
Updating deranged v0.3.9 -> v0.3.11
Updating either v1.9.0 -> v1.10.0
Removing event-listener v4.0.0
Adding event-listener v4.0.3
Adding event-listener v5.2.0
Adding event-listener-strategy v0.5.0
Updating fastrand v2.0.1 -> v2.0.2
Updating futures v0.3.29 -> v0.3.30
Updating futures-channel v0.3.29 -> v0.3.30
Updating futures-core v0.3.29 -> v0.3.30
Updating futures-executor v0.3.29 -> v0.3.30
Updating futures-io v0.3.29 -> v0.3.30
Updating futures-lite v2.0.1 -> v2.3.0
Updating futures-macro v0.3.29 -> v0.3.30
Updating futures-sink v0.3.29 -> v0.3.30
Updating futures-task v0.3.29 -> v0.3.30
Updating futures-timer v3.0.2 -> v3.0.3
Updating futures-util v0.3.29 -> v0.3.30
Updating getrandom v0.2.11 -> v0.2.12
Updating h2 v0.3.22 -> v0.3.25
Adding heck v0.5.0
Updating hermit-abi v0.3.3 -> v0.3.9
Updating hkdf v0.12.3 -> v0.12.4
Updating home v0.5.5 -> v0.5.9
Updating http v0.2.11 -> v0.2.12
Updating http-body v0.4.5 -> v0.4.6
Updating hyper v0.14.27 -> v0.14.28
Updating iana-time-zone v0.1.58 -> v0.1.60
Updating indexmap v2.1.0 -> v2.2.6
Updating indicatif v0.17.7 -> v0.17.8
Updating itertools v0.11.0 -> v0.12.1
Updating itoa v1.0.9 -> v1.0.11
Updating js-sys v0.3.66 -> v0.3.69
Updating libc v0.2.150 -> v0.2.153
Updating linux-raw-sys v0.4.12 -> v0.4.13
Updating log v0.4.20 -> v0.4.21
Updating memchr v2.6.4 -> v2.7.2
Updating miniz_oxide v0.7.1 -> v0.7.2
Updating mio v0.8.9 -> v0.8.11
Adding num-conv v0.1.0
Updating num-integer v0.1.45 -> v0.1.46
Updating num-iter v0.1.43 -> v0.1.44
Updating num-traits v0.2.17 -> v0.2.18
Updating object v0.32.1 -> v0.32.2
Updating once_cell v1.18.0 -> v1.19.0
Updating pest v2.7.5 -> v2.7.8
Updating pest_derive v2.7.5 -> v2.7.8
Updating pest_generator v2.7.5 -> v2.7.8
Updating pest_meta v2.7.5 -> v2.7.8
Updating pin-project v1.1.3 -> v1.1.5
Updating pin-project-internal v1.1.3 -> v1.1.5
Updating pkg-config v0.3.27 -> v0.3.30
Updating polling v3.3.1 -> v3.6.0
Updating portable-atomic v1.5.1 -> v1.6.0
Updating proc-macro2 v1.0.70 -> v1.0.79
Updating quote v1.0.33 -> v1.0.35
Updating regex v1.10.2 -> v1.10.4
Updating regex-automata v0.4.3 -> v0.4.6
Updating regex-syntax v0.8.2 -> v0.8.3
Updating ring v0.17.6 -> v0.17.8
Updating rustix v0.38.26 -> v0.38.32
Updating rustls v0.21.9 -> v0.21.10
Updating ryu v1.0.15 -> v1.0.17
Updating schannel v0.1.22 -> v0.1.23
Updating serde v1.0.193 -> v1.0.197
Updating serde_derive v1.0.193 -> v1.0.197
Updating serde_json v1.0.108 -> v1.0.115
Updating smallvec v1.11.2 -> v1.13.2
Updating socket2 v0.5.5 -> v0.5.6
Updating sqlformat v0.2.2 -> v0.2.3
Updating strsim v0.10.0 -> v0.11.0
Updating syn v2.0.39 -> v2.0.55
Updating tempfile v3.8.1 -> v3.10.1
Updating thiserror v1.0.50 -> v1.0.58
Updating thiserror-impl v1.0.50 -> v1.0.58
Updating thread_local v1.1.7 -> v1.1.8
Updating time v0.3.30 -> v0.3.34
Updating time-macros v0.2.15 -> v0.2.17
Updating tokio v1.34.0 -> v1.36.0
Updating tokio-stream v0.1.14 -> v0.1.15
Updating try-lock v0.2.4 -> v0.2.5
Updating unicode-bidi v0.3.13 -> v0.3.15
Updating unicode-normalization v0.1.22 -> v0.1.23
Updating unicode-segmentation v1.10.1 -> v1.11.0
Updating uuid v1.6.1 -> v1.8.0
Updating value-bag v1.4.2 -> v1.8.1
Adding wasite v0.1.0
Updating wasm-bindgen v0.2.89 -> v0.2.92
Updating wasm-bindgen-backend v0.2.89 -> v0.2.92
Updating wasm-bindgen-futures v0.4.39 -> v0.4.42
Updating wasm-bindgen-macro v0.2.89 -> v0.2.92
Updating wasm-bindgen-macro-support v0.2.89 -> v0.2.92
Updating wasm-bindgen-shared v0.2.89 -> v0.2.92
Updating web-sys v0.3.66 -> v0.3.69
Updating whoami v1.4.1 -> v1.5.1
Updating windows-core v0.51.1 -> v0.52.0
Removing windows-sys v0.45.0
Removing windows-targets v0.42.2
Removing windows-targets v0.52.0
Adding windows-targets v0.52.4
Removing windows_aarch64_gnullvm v0.42.2
Removing windows_aarch64_gnullvm v0.52.0
Adding windows_aarch64_gnullvm v0.52.4
Removing windows_aarch64_msvc v0.42.2
Removing windows_aarch64_msvc v0.52.0
Adding windows_aarch64_msvc v0.52.4
Removing windows_i686_gnu v0.42.2
Removing windows_i686_gnu v0.52.0
Adding windows_i686_gnu v0.52.4
Removing windows_i686_msvc v0.42.2
Removing windows_i686_msvc v0.52.0
Adding windows_i686_msvc v0.52.4
Removing windows_x86_64_gnu v0.42.2
Removing windows_x86_64_gnu v0.52.0
Adding windows_x86_64_gnu v0.52.4
Removing windows_x86_64_gnullvm v0.42.2
Removing windows_x86_64_gnullvm v0.52.0
Adding windows_x86_64_gnullvm v0.52.4
Removing windows_x86_64_msvc v0.42.2
Removing windows_x86_64_msvc v0.52.0
Adding windows_x86_64_msvc v0.52.4
Updating zerocopy v0.7.28 -> v0.7.32
Updating zerocopy-derive v0.7.28 -> v0.7.32
2024-03-28 09:52:58 -05:00
Laszlo Megyer
767b76b2b3 fix: author filter in SQLite queries use correct blob type
https://todo.sr.ht/~gheartsfield/nostr-rs-relay/79
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2024-03-28 09:40:17 -05:00
Greg Heartsfield
c5fb16cd98 improvement: describe migration step that failed 2023-12-09 09:51:09 -06:00
Kieran
9c86f03902 refactor: drop hexrange
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-12-03 12:52:03 -06:00
Greg Heartsfield
971889f9a6 improvement: disable limit_scrapers by default
This is a good feature, but will limit valid requests from being
served.  Defaulting this to off will be less surprising to relay ops.
2023-12-03 10:51:59 -06:00
Kieran
388eadf880 feat: limit_scrapers
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-12-03 10:51:49 -06:00
Greg Heartsfield
1ce029860c docs: ensure latest Rust base image is pulled 2023-12-02 09:27:27 -06:00
Greg Heartsfield
b7e10e26a2 improvement: upgrade dependencies
Adding async-channel v2.1.1
Updating async-executor v1.6.0 -> v1.8.0
Updating async-global-executor v2.3.1 -> v2.4.0
Adding async-io v2.2.1
Adding async-lock v3.1.2
Updating blocking v1.4.1 -> v1.5.1
Updating clap v4.4.7 -> v4.4.10
Updating clap_builder v4.4.7 -> v4.4.9
Updating core-foundation v0.9.3 -> v0.9.4
Updating core-foundation-sys v0.8.4 -> v0.8.6
Updating errno v0.3.5 -> v0.3.8
Adding event-listener v4.0.0
Adding event-listener-strategy v0.4.0
Updating form_urlencoded v1.2.0 -> v1.2.1
Adding futures-lite v2.0.1
Updating getrandom v0.2.10 -> v0.2.11
Updating gimli v0.28.0 -> v0.28.1
Updating h2 v0.3.21 -> v0.3.22
Updating hashbrown v0.14.2 -> v0.14.3
Updating hdrhistogram v7.5.2 -> v7.5.4
Updating http v0.2.9 -> v0.2.11
Updating idna v0.4.0 -> v0.5.0
Updating js-sys v0.3.65 -> v0.3.66
Updating linux-raw-sys v0.4.10 -> v0.4.12
Updating percent-encoding v2.3.0 -> v2.3.1
Adding polling v3.3.1
Updating proc-macro2 v1.0.69 -> v1.0.70
Updating ring v0.17.5 -> v0.17.6
Updating rustix v0.38.21 -> v0.38.26
Updating rustls v0.21.8 -> v0.21.9
Updating rustls-pemfile v1.0.3 -> v1.0.4
Updating serde v1.0.190 -> v1.0.193
Updating serde_derive v1.0.190 -> v1.0.193
Updating smallvec v1.11.1 -> v1.11.2
Updating syn v2.0.38 -> v2.0.39
Updating tokio v1.33.0 -> v1.34.0
Updating tokio-macros v2.1.0 -> v2.2.0
Updating tracing-appender v0.2.2 -> v0.2.3
Updating tracing-log v0.1.4 -> v0.2.0
Updating tracing-subscriber v0.3.17 -> v0.3.18
Updating url v2.4.1 -> v2.5.0
Updating uuid v1.5.0 -> v1.6.1
Updating wasm-bindgen v0.2.88 -> v0.2.89
Updating wasm-bindgen-backend v0.2.88 -> v0.2.89
Updating wasm-bindgen-futures v0.4.38 -> v0.4.39
Updating wasm-bindgen-macro v0.2.88 -> v0.2.89
Updating wasm-bindgen-macro-support v0.2.88 -> v0.2.89
Updating wasm-bindgen-shared v0.2.88 -> v0.2.89
Updating web-sys v0.3.65 -> v0.3.66
Adding windows-sys v0.52.0
Adding windows-targets v0.52.0
Adding windows_aarch64_gnullvm v0.52.0
Adding windows_aarch64_msvc v0.52.0
Adding windows_i686_gnu v0.52.0
Adding windows_i686_msvc v0.52.0
Adding windows_x86_64_gnu v0.52.0
Adding windows_x86_64_gnullvm v0.52.0
Adding windows_x86_64_msvc v0.52.0
Updating zerocopy v0.7.25 -> v0.7.28
Updating zerocopy-derive v0.7.25 -> v0.7.28
2023-12-01 17:51:17 -06:00
Kieran
ab736f5f98 fix: abort query builder for empty arrays
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-12-01 17:22:14 -06:00
Kieran
b4471a6698 fix: multi-tag query
fixes https://github.com/scsibug/nostr-rs-relay/issues/102

Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-12-01 17:22:01 -06:00
Kieran
7120de4ff8 feat: restricted_writes
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-11-23 13:27:27 -06:00
thesimplekid
4ff77ab537 improvement: config to disable dm/invoice creation
While sending dms to users who are not signed up
but have attempted to publish events, it is now
disabled by default. This stops the creation of
extra invoices for pubkeys that may have no
intention of signing up for the relay. It also
reduced the number of dms that are created.

Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-11-23 13:22:41 -06:00
Carsten Otto
84f60f0abc improvement(NIP-11): mention requirements for admin contact pubkey
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-11-23 13:14:45 -06:00
Greg Heartsfield
8a67770206 improvement: update dependencies
Removing ahash v0.4.7
Removing ahash v0.7.6
Removing ahash v0.8.3
Adding ahash v0.4.8
Adding ahash v0.7.7
Adding ahash v0.8.6
Updating aho-corasick v1.0.5 -> v1.1.2
Updating anstream v0.5.0 -> v0.6.4
Updating anstyle v1.0.2 -> v1.0.4
Updating anstyle-parse v0.2.1 -> v0.2.2
Updating anstyle-wincon v2.1.0 -> v3.0.1
Updating async-executor v1.5.1 -> v1.6.0
Updating async-task v4.4.0 -> v4.5.0
Updating async-trait v0.1.73 -> v0.1.74
Updating atomic-waker v1.1.1 -> v1.1.2
Updating base64 v0.21.3 -> v0.21.5
Updating bitflags v2.4.0 -> v2.4.1
Updating blocking v1.3.1 -> v1.4.1
Updating bumpalo v3.13.0 -> v3.14.0
Updating byteorder v1.4.3 -> v1.5.0
Updating bytes v1.4.0 -> v1.5.0
Updating chrono v0.4.28 -> v0.4.31
Updating clap v4.4.2 -> v4.4.7
Updating clap_builder v4.4.2 -> v4.4.7
Updating clap_derive v4.4.2 -> v4.4.7
Updating clap_lex v0.5.1 -> v0.6.0
Updating concurrent-queue v2.2.0 -> v2.3.0
Updating const_format v0.2.31 -> v0.2.32
Updating const_format_proc_macros v0.2.31 -> v0.2.32
Updating cpufeatures v0.2.9 -> v0.2.11
Updating crc-catalog v2.2.0 -> v2.4.0
Updating deranged v0.3.8 -> v0.3.9
Removing dirs v5.0.1
Removing dirs-sys v0.4.1
Updating errno v0.3.3 -> v0.3.5
Removing errno-dragonfly v0.1.2
Updating fastrand v2.0.0 -> v2.0.1
Adding finl_unicode v1.2.0
Updating flate2 v1.0.27 -> v1.0.28
Updating futures v0.3.28 -> v0.3.29
Updating futures-channel v0.3.28 -> v0.3.29
Updating futures-core v0.3.28 -> v0.3.29
Updating futures-executor v0.3.28 -> v0.3.29
Updating futures-io v0.3.28 -> v0.3.29
Updating futures-macro v0.3.28 -> v0.3.29
Updating futures-sink v0.3.28 -> v0.3.29
Updating futures-task v0.3.28 -> v0.3.29
Updating futures-util v0.3.28 -> v0.3.29
Updating hashbrown v0.14.0 -> v0.14.2
Updating hermit-abi v0.3.2 -> v0.3.3
Adding home v0.5.5
Updating hyper-rustls v0.24.1 -> v0.24.2
Updating iana-time-zone v0.1.57 -> v0.1.58
Updating indexmap v2.0.0 -> v2.1.0
Updating indicatif v0.17.6 -> v0.17.7
Updating js-sys v0.3.64 -> v0.3.65
Updating libc v0.2.147 -> v0.2.150
Adding libredox v0.0.1
Updating linux-raw-sys v0.4.5 -> v0.4.10
Updating lock_api v0.4.10 -> v0.4.11
Updating matchit v0.7.2 -> v0.7.3
Updating md-5 v0.10.5 -> v0.10.6
Updating memchr v2.6.3 -> v2.6.4
Updating mio v0.8.8 -> v0.8.9
Updating num-traits v0.2.16 -> v0.2.17
Removing option-ext v0.2.0
Updating parking v2.1.0 -> v2.2.0
Updating parking_lot_core v0.9.8 -> v0.9.9
Updating pest v2.7.3 -> v2.7.5
Updating pest_derive v2.7.3 -> v2.7.5
Updating pest_generator v2.7.3 -> v2.7.5
Updating pest_meta v2.7.3 -> v2.7.5
Adding piper v0.2.1
Updating portable-atomic v1.4.3 -> v1.5.1
Adding powerfmt v0.2.0
Updating proc-macro2 v1.0.66 -> v1.0.69
Updating redox_syscall v0.3.5 -> v0.4.1
Updating redox_users v0.4.3 -> v0.4.4
Updating regex v1.9.5 -> v1.10.2
Updating regex-automata v0.3.8 -> v0.4.3
Updating regex-syntax v0.7.5 -> v0.8.2
Adding ring v0.17.5
Removing rustix v0.37.23
Removing rustix v0.38.11
Adding rustix v0.37.27
Adding rustix v0.38.21
Updating rustls v0.21.7 -> v0.21.8
Updating rustls-webpki v0.101.4 -> v0.101.7
Updating sct v0.7.0 -> v0.7.1
Updating serde v1.0.188 -> v1.0.190
Updating serde_derive v1.0.188 -> v1.0.190
Updating serde_json v1.0.105 -> v1.0.108
Updating sha1 v0.10.5 -> v0.10.6
Updating sha2 v0.10.7 -> v0.10.8
Updating sharded-slab v0.1.4 -> v0.1.7
Updating smallvec v1.11.0 -> v1.11.1
Removing socket2 v0.4.9
Removing socket2 v0.5.3
Adding socket2 v0.4.10
Adding socket2 v0.5.5
Adding spin v0.9.8
Updating stringprep v0.1.3 -> v0.1.4
Updating syn v2.0.31 -> v2.0.38
Updating tempfile v3.8.0 -> v3.8.1
Updating thiserror v1.0.48 -> v1.0.50
Updating thiserror-impl v1.0.48 -> v1.0.50
Removing time v0.1.43
Removing time v0.3.28
Adding time v0.3.30
Updating time-core v0.1.1 -> v0.1.2
Updating time-macros v0.2.14 -> v0.2.15
Updating tokio v1.32.0 -> v1.33.0
Updating tokio-util v0.7.8 -> v0.7.10
Updating tracing v0.1.37 -> v0.1.40
Updating tracing-attributes v0.1.26 -> v0.1.27
Updating tracing-core v0.1.31 -> v0.1.32
Updating tracing-log v0.1.3 -> v0.1.4
Updating typenum v1.16.0 -> v1.17.0
Updating unicode-ident v1.0.11 -> v1.0.12
Updating unicode-width v0.1.10 -> v0.1.11
Adding untrusted v0.9.0
Updating uuid v1.4.1 -> v1.5.0
Updating value-bag v1.4.1 -> v1.4.2
Updating waker-fn v1.1.0 -> v1.1.1
Updating wasm-bindgen v0.2.87 -> v0.2.88
Updating wasm-bindgen-backend v0.2.87 -> v0.2.88
Updating wasm-bindgen-futures v0.4.37 -> v0.4.38
Updating wasm-bindgen-macro v0.2.87 -> v0.2.88
Updating wasm-bindgen-macro-support v0.2.87 -> v0.2.88
Updating wasm-bindgen-shared v0.2.87 -> v0.2.88
Updating web-sys v0.3.64 -> v0.3.65
Updating webpki v0.22.1 -> v0.22.4
Updating which v4.4.1 -> v4.4.2
Removing windows v0.48.0
Adding windows-core v0.51.1
Adding zerocopy v0.7.25
Adding zerocopy-derive v0.7.25
2023-11-05 09:54:07 -06:00
thesimplekid
7650f5f4a3 fix: relay fee in msats
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-11-03 18:30:01 -05:00
Yuki Kishimoto
a7b169c0d3 fix: send OK message for ephemeral events
4b9f13d983/01.md (L153)
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-11-03 18:19:25 -05:00
Kieran
24b1705a08 fix: value_hex tag query
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-11-03 18:17:05 -05:00
benthecarman
9d0a98f8bf docs: add line for enabling systemd service
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-11-03 18:15:25 -05:00
Greg Heartsfield
26f296f76f build: bump version to 0.8.13 2023-09-04 10:03:16 -05:00
Greg Heartsfield
c3c9b5dcd2 improvement: remove openssl dependency 2023-09-04 07:53:58 -05:00
Greg Heartsfield
da29bdd837 test: fix broken connection tests 2023-09-04 07:24:41 -05:00
Greg Heartsfield
bacb85024c improvement: update dependencies
Updating addr2line v0.20.0 -> v0.21.0
Updating aho-corasick v1.0.2 -> v1.0.5
Updating anstream v0.3.2 -> v0.5.0
Updating anstyle v1.0.1 -> v1.0.2
Updating anstyle-wincon v1.0.2 -> v2.1.0
Updating anyhow v1.0.72 -> v1.0.75
Updating async-lock v2.7.0 -> v2.8.0
Updating async-trait v0.1.72 -> v0.1.73
Updating backtrace v0.3.68 -> v0.3.69
Updating base64 v0.21.2 -> v0.21.3
Updating bitflags v2.3.3 -> v2.4.0
Updating cc v1.0.82 -> v1.0.83
Updating chrono v0.4.26 -> v0.4.28
Updating clap v4.3.21 -> v4.4.2
Updating clap_builder v4.3.21 -> v4.4.2
Updating clap_derive v4.3.12 -> v4.4.2
Updating clap_lex v0.5.0 -> v0.5.1
Updating dashmap v5.5.0 -> v5.5.3
Updating deranged v0.3.7 -> v0.3.8
Adding dirs v5.0.1
Adding dirs-sys v0.4.1
Updating errno v0.3.2 -> v0.3.3
Updating flate2 v1.0.26 -> v1.0.27
Updating gimli v0.27.3 -> v0.28.0
Updating h2 v0.3.20 -> v0.3.21
Updating hashlink v0.8.3 -> v0.8.4
Updating httpdate v1.0.2 -> v1.0.3
Removing is-terminal v0.4.9
Adding itertools v0.11.0
Updating log v0.4.19 -> v0.4.20
Updating memchr v2.5.0 -> v2.6.3
Updating object v0.31.1 -> v0.32.1
Updating openssl v0.10.56 -> v0.10.57
Updating openssl-sys v0.9.91 -> v0.9.92
Adding option-ext v0.2.0
Updating pest v2.7.2 -> v2.7.3
Updating pest_derive v2.7.2 -> v2.7.3
Updating pest_generator v2.7.2 -> v2.7.3
Updating pest_meta v2.7.2 -> v2.7.3
Updating petgraph v0.6.3 -> v0.6.4
Updating pin-project-lite v0.2.12 -> v0.2.13
Updating portable-atomic v1.4.2 -> v1.4.3
Updating quote v1.0.32 -> v1.0.33
Updating regex v1.9.3 -> v1.9.5
Updating regex-automata v0.3.6 -> v0.3.8
Updating regex-syntax v0.7.4 -> v0.7.5
Updating rustix v0.38.7 -> v0.38.11
Updating rustls v0.20.8 -> v0.20.9
Updating serde v1.0.183 -> v1.0.188
Updating serde_derive v1.0.183 -> v1.0.188
Updating serde_json v1.0.104 -> v1.0.105
Updating slab v0.4.8 -> v0.4.9
Updating sqlformat v0.2.1 -> v0.2.2
Updating syn v2.0.28 -> v2.0.31
Updating tempfile v3.7.1 -> v3.8.0
Updating thiserror v1.0.44 -> v1.0.48
Updating thiserror-impl v1.0.44 -> v1.0.48
Removing time v0.1.45
Removing time v0.3.25
Adding time v0.1.43
Adding time v0.3.28
Updating time-macros v0.2.11 -> v0.2.14
Updating tokio v1.30.0 -> v1.32.0
Updating url v2.4.0 -> v2.4.1
Updating wasi v0.10.0+wasi-snapshot-preview1 -> v0.10.2+wasi-snapshot-preview1
Updating webpki v0.22.0 -> v0.22.1
Updating which v4.4.0 -> v4.4.1
Updating windows-targets v0.48.1 -> v0.48.5
Updating windows_aarch64_gnullvm v0.48.0 -> v0.48.5
Updating windows_aarch64_msvc v0.48.0 -> v0.48.5
Updating windows_i686_gnu v0.48.0 -> v0.48.5
Updating windows_i686_msvc v0.48.0 -> v0.48.5
Updating windows_x86_64_gnu v0.48.0 -> v0.48.5
Updating windows_x86_64_gnullvm v0.48.0 -> v0.48.5
Updating windows_x86_64_msvc v0.48.0 -> v0.48.5
2023-09-04 06:52:02 -05:00
Wspsxing
7a77c459bb fix: panic on malformed signature 2023-09-04 06:48:26 -05:00
Greg Heartsfield
34c8b04926 improvement: update dependencies
Updating crates.io index
Updating anstyle-wincon v1.0.1 -> v1.0.2
Updating cc v1.0.81 -> v1.0.82
Updating clap v4.3.19 -> v4.3.21
Updating clap_builder v4.3.19 -> v4.3.21
Updating openssl v0.10.55 -> v0.10.56
Updating openssl-sys v0.9.90 -> v0.9.91
Updating pin-project v1.1.2 -> v1.1.3
Updating pin-project-internal v1.1.2 -> v1.1.3
Updating pin-project-lite v0.2.10 -> v0.2.12
Updating regex v1.9.1 -> v1.9.3
Updating regex-automata v0.3.4 -> v0.3.6
Updating rustix v0.38.6 -> v0.38.7
Updating serde v1.0.181 -> v1.0.183
Updating serde_derive v1.0.181 -> v1.0.183
Adding socket2 v0.5.3
Updating tempfile v3.7.0 -> v3.7.1
Updating tokio v1.29.1 -> v1.30.0
2023-08-09 15:00:34 -07:00
Greg Heartsfield
1032a51220 refactor: clippy suggestions 2023-08-09 14:59:39 -07:00
Václav Navrátil
79abd981e1 fix: build gRPC server code
This will allow the gRPC example to compile.

Fix for https://github.com/scsibug/nostr-rs-relay/issues/141
2023-08-09 13:24:52 -07:00
rorp
b1957ab2b1 feat(NIP-42): extend authz to NIP-44 DMs and NIP-59 gift wraps 2023-08-09 13:11:03 -07:00
Greg Heartsfield
23aa6e7313 docs: sqlite in-memory mode is false by default 2023-08-09 13:09:11 -07:00
Greg Heartsfield
fb751ba252 build: bump version to 0.8.12 2023-08-05 15:33:14 -05:00
Greg Heartsfield
7c5e851b82 fix: reset in-memory config to be false 2023-08-05 15:29:16 -05:00
Greg Heartsfield
f965c53434 build: bump versiot to 0.8.11 2023-08-05 11:42:14 -05:00
Greg Heartsfield
74376d94e5 improvement: upgrade multiple dependencies
Adding addr2line v0.20.0
Updating aes v0.8.2 -> v0.8.3
Adding ahash v0.8.3
Updating aho-corasick v0.7.20 -> v1.0.2
Adding allocator-api2 v0.2.16
Adding android-tzdata v0.1.1
Adding anstream v0.3.2
Adding anstyle v1.0.1
Adding anstyle-parse v0.2.1
Adding anstyle-query v1.0.0
Adding anstyle-wincon v1.0.1
Updating anyhow v1.0.69 -> v1.0.72
Updating async-channel v1.8.0 -> v1.9.0
Updating async-executor v1.5.0 -> v1.5.1
Updating async-io v1.12.0 -> v1.13.0
Updating async-lock v2.6.0 -> v2.7.0
Updating async-stream v0.3.3 -> v0.3.5
Updating async-stream-impl v0.3.3 -> v0.3.5
Updating async-task v4.3.0 -> v4.4.0
Updating async-trait v0.1.64 -> v0.1.72
Updating atomic-waker v1.1.0 -> v1.1.1
Updating axum v0.6.6 -> v0.6.20
Updating axum-core v0.3.2 -> v0.3.4
Adding backtrace v0.3.68
Updating base64 v0.21.0 -> v0.21.2
Adding bitflags v2.3.3
Updating block-buffer v0.10.3 -> v0.10.4
Updating block-padding v0.3.2 -> v0.3.3
Updating blocking v1.3.0 -> v1.3.1
Updating bumpalo v3.12.0 -> v3.13.0
Updating cc v1.0.79 -> v1.0.81
Updating chrono v0.4.23 -> v0.4.26
Updating cipher v0.4.3 -> v0.4.4
Updating clap v4.1.4 -> v4.3.19
Adding clap_builder v4.3.19
Updating clap_derive v4.1.0 -> v4.3.12
Updating clap_lex v0.3.1 -> v0.5.0
Removing codespan-reporting v0.11.1
Adding colorchoice v1.0.0
Updating concurrent-queue v2.1.0 -> v2.2.0
Updating console v0.15.5 -> v0.15.7
Updating console-api v0.4.0 -> v0.5.0
Updating console-subscriber v0.1.8 -> v0.1.10
Updating const_format v0.2.30 -> v0.2.31
Updating const_format_proc_macros v0.2.29 -> v0.2.31
Updating core-foundation-sys v0.8.3 -> v0.8.4
Updating cpufeatures v0.2.5 -> v0.2.9
Updating crossbeam-channel v0.5.6 -> v0.5.8
Updating crossbeam-utils v0.8.14 -> v0.8.16
Removing ctor v0.1.26
Removing cxx v1.0.90
Removing cxx-build v1.0.90
Removing cxxbridge-flags v1.0.90
Removing cxxbridge-macro v1.0.90
Updating dashmap v5.4.0 -> v5.5.0
Adding deranged v0.3.7
Updating digest v0.10.6 -> v0.10.7
Updating dotenvy v0.15.6 -> v0.15.7
Updating either v1.8.1 -> v1.9.0
Adding equivalent v1.0.1
Updating errno v0.2.8 -> v0.3.2
Removing fastrand v1.8.0
Adding fastrand v1.9.0
Adding fastrand v2.0.0
Updating flate2 v1.0.25 -> v1.0.26
Updating form_urlencoded v1.1.0 -> v1.2.0
Updating futures v0.3.26 -> v0.3.28
Updating futures-channel v0.3.26 -> v0.3.28
Updating futures-core v0.3.26 -> v0.3.28
Updating futures-executor v0.3.26 -> v0.3.28
Updating futures-io v0.3.26 -> v0.3.28
Updating futures-lite v1.12.0 -> v1.13.0
Updating futures-macro v0.3.26 -> v0.3.28
Updating futures-sink v0.3.26 -> v0.3.28
Updating futures-task v0.3.26 -> v0.3.28
Updating futures-util v0.3.26 -> v0.3.28
Updating generic-array v0.14.6 -> v0.14.7
Updating getrandom v0.2.8 -> v0.2.10
Adding gimli v0.27.3
Updating h2 v0.3.15 -> v0.3.20
Adding hashbrown v0.14.0
Updating hashlink v0.8.1 -> v0.8.3
Removing hermit-abi v0.2.6
Removing hermit-abi v0.3.1
Adding hermit-abi v0.3.2
Updating http v0.2.8 -> v0.2.9
Removing http-range-header v0.3.0
Updating hyper v0.14.24 -> v0.14.27
Updating iana-time-zone v0.1.53 -> v0.1.57
Updating iana-time-zone-haiku v0.1.1 -> v0.1.2
Updating idna v0.3.0 -> v0.4.0
Removing indexmap v1.9.2
Adding indexmap v1.9.3
Adding indexmap v2.0.0
Updating indicatif v0.17.3 -> v0.17.6
Updating io-lifetimes v1.0.5 -> v1.0.11
Updating is-terminal v0.4.3 -> v0.4.9
Updating itoa v1.0.5 -> v1.0.9
Updating js-sys v0.3.61 -> v0.3.64
Updating libc v0.2.139 -> v0.2.147
Removing link-cplusplus v1.0.8
Removing linux-raw-sys v0.1.4
Adding linux-raw-sys v0.3.8
Adding linux-raw-sys v0.4.5
Updating lock_api v0.4.9 -> v0.4.10
Updating log v0.4.17 -> v0.4.19
Updating matchit v0.7.0 -> v0.7.2
Updating mime v0.3.16 -> v0.3.17
Updating miniz_oxide v0.6.2 -> v0.7.1
Updating mio v0.8.5 -> v0.8.8
Updating nostr v0.18.0 -> v0.18.1
Updating nostr-rs-relay v0.8.9 -> v0.8.10
Updating num-traits v0.2.15 -> v0.2.16
Updating num_cpus v1.15.0 -> v1.16.0
Adding object v0.31.1
Updating once_cell v1.17.0 -> v1.18.0
Updating openssl v0.10.45 -> v0.10.55
Updating openssl-macros v0.1.0 -> v0.1.1
Updating openssl-sys v0.9.80 -> v0.9.90
Removing os_str_bytes v6.4.1
Updating parking v2.0.0 -> v2.1.0
Updating parking_lot_core v0.9.7 -> v0.9.8
Updating paste v1.0.11 -> v1.0.14
Updating percent-encoding v2.2.0 -> v2.3.0
Updating pest v2.5.5 -> v2.7.2
Updating pest_derive v2.5.5 -> v2.7.2
Updating pest_generator v2.5.5 -> v2.7.2
Updating pest_meta v2.5.5 -> v2.7.2
Updating pin-project v1.0.12 -> v1.1.2
Updating pin-project-internal v1.0.12 -> v1.1.2
Updating pin-project-lite v0.2.9 -> v0.2.10
Updating pkg-config v0.3.26 -> v0.3.27
Updating polling v2.5.2 -> v2.8.0
Updating portable-atomic v0.3.19 -> v1.4.2
Updating prettyplease v0.1.23 -> v0.1.25
Removing proc-macro-error v1.0.4
Removing proc-macro-error-attr v1.0.4
Updating proc-macro2 v1.0.51 -> v1.0.66
Updating prost v0.11.6 -> v0.11.9
Updating prost-build v0.11.6 -> v0.11.9
Updating prost-derive v0.11.6 -> v0.11.9
Updating prost-types v0.11.6 -> v0.11.9
Updating quote v1.0.23 -> v1.0.32
Updating raw-cpuid v10.6.1 -> v10.7.0
Adding redox_syscall v0.3.5
Updating regex v1.7.1 -> v1.9.1
Adding regex-automata v0.3.4
Removing regex-syntax v0.6.28
Adding regex-syntax v0.6.29
Adding regex-syntax v0.7.4
Removing remove_dir_all v0.5.3
Adding rustc-demangle v0.1.23
Removing rustix v0.36.8
Adding rustix v0.37.23
Adding rustix v0.38.6
Updating rustls-pemfile v1.0.2 -> v1.0.3
Updating rustversion v1.0.11 -> v1.0.14
Updating ryu v1.0.12 -> v1.0.15
Updating schannel v0.1.21 -> v0.1.22
Updating scheduled-thread-pool v0.2.6 -> v0.2.7
Updating scopeguard v1.1.0 -> v1.2.0
Removing scratch v1.0.3
Updating security-framework v2.8.2 -> v2.9.2
Updating security-framework-sys v2.8.0 -> v2.9.1
Updating serde v1.0.152 -> v1.0.181
Updating serde_derive v1.0.152 -> v1.0.181
Updating serde_json v1.0.93 -> v1.0.104
Updating sha2 v0.10.6 -> v0.10.7
Updating slab v0.4.7 -> v0.4.8
Updating smallvec v1.10.0 -> v1.11.0
Updating socket2 v0.4.7 -> v0.4.9
Updating sqlx v0.6.2 -> v0.6.3
Updating sqlx-core v0.6.2 -> v0.6.3
Updating sqlx-macros v0.6.2 -> v0.6.3
Updating sqlx-rt v0.6.2 -> v0.6.3
Updating stringprep v0.1.2 -> v0.1.3
Updating subtle v2.4.1 -> v2.5.0
Removing syn v1.0.107
Adding syn v1.0.109
Adding syn v2.0.28
Updating tempfile v3.3.0 -> v3.7.0
Removing termcolor v1.2.0
Updating thiserror v1.0.38 -> v1.0.44
Updating thiserror-impl v1.0.38 -> v1.0.44
Updating tikv-jemalloc-sys v0.5.3+5.3.0-patched -> v0.5.4+5.3.0-patched
Updating tikv-jemallocator v0.5.0 -> v0.5.4
Updating time v0.3.20 -> v0.3.25
Updating time-core v0.1.0 -> v0.1.1
Updating time-macros v0.2.8 -> v0.2.11
Updating tokio v1.25.0 -> v1.29.1
Updating tokio-macros v1.8.2 -> v2.1.0
Updating tokio-stream v0.1.11 -> v0.1.14
Updating tokio-util v0.7.7 -> v0.7.8
Adding tonic v0.9.2
Removing tower-http v0.3.5
Updating tracing-attributes v0.1.23 -> v0.1.26
Updating tracing-core v0.1.30 -> v0.1.31
Updating tracing-subscriber v0.3.16 -> v0.3.17
Updating ucd-trie v0.1.5 -> v0.1.6
Updating unicode-bidi v0.3.10 -> v0.3.13
Updating unicode-ident v1.0.6 -> v1.0.11
Updating url v2.3.1 -> v2.4.0
Adding utf8parse v0.2.1
Updating uuid v1.3.0 -> v1.4.1
Updating value-bag v1.0.0-alpha.9 -> v1.4.1
Updating want v0.3.0 -> v0.3.1
Updating wasm-bindgen v0.2.84 -> v0.2.87
Updating wasm-bindgen-backend v0.2.84 -> v0.2.87
Updating wasm-bindgen-futures v0.4.34 -> v0.4.37
Updating wasm-bindgen-macro v0.2.84 -> v0.2.87
Updating wasm-bindgen-macro-support v0.2.84 -> v0.2.87
Updating wasm-bindgen-shared v0.2.84 -> v0.2.87
Updating web-sys v0.3.61 -> v0.3.64
Removing wepoll-ffi v0.1.2
Updating whoami v1.3.0 -> v1.4.1
Removing winapi-util v0.1.5
Adding windows v0.48.0
Updating windows-sys v0.42.0 -> v0.48.0
Removing windows-targets v0.42.1
Adding windows-targets v0.42.2
Adding windows-targets v0.48.1
Removing windows_aarch64_gnullvm v0.42.1
Adding windows_aarch64_gnullvm v0.42.2
Adding windows_aarch64_gnullvm v0.48.0
Removing windows_aarch64_msvc v0.42.1
Adding windows_aarch64_msvc v0.42.2
Adding windows_aarch64_msvc v0.48.0
Removing windows_i686_gnu v0.42.1
Adding windows_i686_gnu v0.42.2
Adding windows_i686_gnu v0.48.0
Removing windows_i686_msvc v0.42.1
Adding windows_i686_msvc v0.42.2
Adding windows_i686_msvc v0.48.0
Removing windows_x86_64_gnu v0.42.1
Adding windows_x86_64_gnu v0.42.2
Adding windows_x86_64_gnu v0.48.0
Removing windows_x86_64_gnullvm v0.42.1
Adding windows_x86_64_gnullvm v0.42.2
Adding windows_x86_64_gnullvm v0.48.0
Removing windows_x86_64_msvc v0.42.1
Adding windows_x86_64_msvc v0.42.2
Adding windows_x86_64_msvc v0.48.0
2023-08-05 11:37:23 -05:00
Greg Heartsfield
21d1bbcfe3 build: bump version to 0.8.10 2023-08-05 11:18:12 -05:00
Greg Heartsfield
c3e13af9e3 test: wip integration test for event publishing 2023-08-05 11:16:11 -05:00
Greg Heartsfield
05f70112e8 improvement: reduce logging for hex parse failures in events 2023-08-05 07:13:53 -05:00
Greg Heartsfield
eab522dc39 feat: warn or exit on config file parse errors
The relay will now fail to start if an invalid config file is
explicitly provided.  If the file was read implicitly from the current
directory, a warning will be provided, but the relay will still startup.
2023-07-29 08:33:27 -05:00
Iru Sensei
edf7af1573 feat: verify config file exists and can be read 2023-07-29 08:32:55 -05:00
Václav Navrátil
34f497a650 docs: example SQL to delete old events
Added SQL Query example to delete events older than 30 days.
2023-07-29 06:45:17 -05:00
Greg Heartsfield
4adad4c3a9 fix: update since/until semantics for subscriptions 2023-07-16 11:42:55 -05:00
Václav Navrátil
70dfcb6a04 feat(NIP-11): relay_icon option added 2023-07-16 11:42:41 -05:00
jiftechnify
c50e10aa21 fix: keep up with the latest specs for since/until filter 2023-07-15 11:12:38 -05:00
Greg Heartsfield
9e22776227 refactor: whitespace 2023-07-03 10:35:51 -05:00
Greg Heartsfield
dad6911807 refactor: clippy suggestions 2023-07-03 10:31:22 -05:00
thesimplekid
ddc58a2f1c feat: config sending dms on pay to relay signup 2023-07-03 09:51:28 -05:00
thesimplekid
1131c1986e fix: lnbits expired invoice for existing user 2023-07-03 09:51:07 -05:00
thesimplekid
06fcaad9a1 chore: typos 2023-07-03 09:49:40 -05:00
Greg Heartsfield
087b68128f fix: ensure startup SQL runs, even with zero min writers 2023-06-23 10:38:06 -05:00
Greg Heartsfield
4647476622 improvement: default to logging on stdout 2023-06-23 10:34:25 -05:00
Greg Heartsfield
7a72e588ea refactor: reorder imports 2023-06-23 10:03:08 -05:00
Jamin M
9237eed735 feat: roll over logs daily 2023-06-23 10:03:01 -05:00
Jamin M
f4beb884b3 feat: allow logging output to file 2023-06-23 10:02:49 -05:00
Yuval Adam
73285683a3 docs: add database maintenance example queries 2023-06-23 09:55:05 -05:00
rorp
2f10271903 improvement(NIP-42): use 'restricted:' prefix for auth error msgs 2023-06-23 09:52:50 -05:00
thesimplekid
a34516628b docs: typo in build-essential package name 2023-06-23 09:48:43 -05:00
Greg Heartsfield
eba7a32615 perf: reduce SQLite connection count and idle lifetime
On lightly loaded relays, we free up memory faster by letting idle
connections be reclaimed in 10 seconds instead of the default 10
minutes.  This also sets the minimum to zero connections, instead of
always trying to hold one open.
2023-05-07 19:38:18 -05:00
Greg Heartsfield
4d746fad85 docs: helpful ubuntu packages for building 2023-05-07 19:33:10 -05:00
Greg Heartsfield
0582a891cc perf: switch to jemalloc allocator 2023-05-07 19:32:50 -05:00
Greg Heartsfield
2bcddf8bbf perf: disable sqlite mmap to reduce memory pressure 2023-05-06 15:40:56 -05:00
Greg Heartsfield
1595ec783d docs: allow host header prefix matching, required for Damus compatibility 2023-05-06 14:43:30 -05:00
Greg Heartsfield
a2d1d78e23 docs: reformatting 2023-05-06 14:42:59 -05:00
Greg Heartsfield
04db2203bb perf: use standard allocator, limit sqlite mmap to 4GB
This is an experimental change to see if we can reduce memory usage
with large SQLite databases.  If successful, we'll do this again and
further reduce the database mmap size.

This will cause greater use of the page cache, but that is more easily
reclaimed by the kernel, and should reduce memory pressure, as well as
making it clearer how much memory the application is actually using
for connections, subscriptions, etc.
2023-05-03 07:22:44 -05:00
Greg Heartsfield
1c1b1a1802 build: upgrade checkout action for github ci 2023-04-30 11:13:03 -05:00
Greg Heartsfield
993fec4eed improvement: document pg connection_write config 2023-04-30 10:10:06 -05:00
Kieran
beffeb4d86 improvement: add a configurable postgres write conn string
This adds a new configurable connection string for postgres writes.
2023-04-30 10:02:10 -05:00
Petr Kracik
5135f3b007 improvement: use appropriate paths for systemd example 2023-04-30 09:55:07 -05:00
Greg Heartsfield
ba0b50bc9c build: bump version to 0.8.9 2023-04-22 13:47:08 -05:00
0xtr
c65c64275e docs: add systemd service file and guide 2023-04-19 18:37:16 -05:00
Greg Heartsfield
80c459c36c improvement: switch to jemalloc allocator 2023-04-06 18:33:30 -05:00
rorp
8e4e2d824b feat(NIP-42): limit access to kind 4 DMs 2023-03-03 09:04:35 -06:00
thesimplekid
c13961a5c4 fix: nip05 for postgres 2023-03-03 08:57:23 -06:00
thesimplekid
05b08c7916 feat: join via nip-07 2023-03-01 18:04:06 -06:00
Greg Heartsfield
9a141dc950 improvement: disable HTTP request logging 2023-02-25 15:57:01 -06:00
Greg Heartsfield
8c9170d4e3 fix: persist database version for v18 migration 2023-02-25 15:55:00 -06:00
Greg Heartsfield
5508020777 improvement: configure pay-to-relay defaults and comment block 2023-02-25 15:53:32 -06:00
Greg Heartsfield
43021910ea improvement: disable pay-to-relay by default 2023-02-25 15:41:30 -06:00
thesimplekid
c0158af18b feat(NIP-111): pay to relay (experimental) 2023-02-25 15:38:26 -06:00
Rene Honig
164603dedd docs: add Traefik to reverse proxy doc 2023-02-25 14:50:58 -06:00
Greg Heartsfield
c1c25a22f5 refactor: format 2023-02-25 14:49:35 -06:00
thesimplekid
6df92f9580 refactor: format
cargo fmt
2023-02-25 14:46:49 -06:00
Greg Heartsfield
440217e1ee docs: add documented support for NIP-40 2023-02-25 14:29:52 -06:00
Greg Heartsfield
96359aafab docs: better example of kinds for allowlist 2023-02-25 14:05:11 -06:00
Mike White
5414629298 feat: add event kind allowlist 2023-02-25 14:00:01 -06:00
Greg Heartsfield
2be75e18fb build: bump version to 0.8.8 2023-02-21 08:16:40 -06:00
Greg Heartsfield
5f6ff4c2b7 fix: in-memory SQLite DB correctly shares memory between connections
fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/73#event-227131
2023-02-21 08:14:19 -06:00
Greg Heartsfield
df411c24fb fix: block other writers during checkpoint to eliminate DB lock errors 2023-02-20 16:50:44 -06:00
Greg Heartsfield
39f9984c4f build: bump version to 0.8.7 2023-02-17 21:05:36 -06:00
Greg Heartsfield
9d55731073 fix: Postgres SQL generation for expiring events 2023-02-17 21:04:30 -06:00
Greg Heartsfield
5638f70d66 fix: set SQL tracing back to appropriate level 2023-02-17 20:50:19 -06:00
Greg Heartsfield
98a08d054a improvement: advertise support for NIP-42 in relay info 2023-02-17 14:02:49 -06:00
Greg Heartsfield
0ef7d618a8 build: bump version to 0.8.6 2023-02-17 13:59:07 -06:00
Greg Heartsfield
bf06bea808 feat(NIP-40): postgres support for event expiration 2023-02-17 13:25:56 -06:00
Greg Heartsfield
e5ca8c2a86 improvement: run expired event cleanup every 10 minutes 2023-02-17 11:22:00 -06:00
Greg Heartsfield
8ea63f0b27 feat(NIP-40): sqlite support for event expiration 2023-02-17 11:15:06 -06:00
Greg Heartsfield
3229e4192f feat: publish favicon.ico 2023-02-16 18:03:28 -06:00
0xtr
7fd9b55e70 fix: typo in sqlite_migration.rs 2023-02-15 18:52:49 -06:00
rorp
5cecfba319 feat(NIP-42): pubkey authentication
Configurable in `config.toml`.  Limited functionality, but this does
send metadata to gRPC for event authorization.

fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/66
2023-02-15 18:51:40 -06:00
Greg Heartsfield
d0f57aea21 improvement(NIP-40): functions for checking event expiration 2023-02-15 18:47:27 -06:00
Yuval Adam
40abd6858e docs: cleanup location of documentation 2023-02-15 18:43:22 -06:00
Greg Heartsfield
136e41d234 fix: retry event writes if DB is busy 2023-02-15 18:38:34 -06:00
Yuval Adam
35a1973a46 fix: allow older versions of protobuf-compiler to work
Add --experimental_allow_proto3_optional protoc arg in build configs

fixes https://github.com/scsibug/nostr-rs-relay/issues/77
2023-02-14 16:59:41 -06:00
Kieran
1daa25600d fix: postgres tag inserts 2023-02-14 06:33:01 -06:00
Greg Heartsfield
692925942a build: bump version to 0.8.5 2023-02-13 17:53:33 -06:00
Greg Heartsfield
84afd4b64e refactor: whitespace 2023-02-13 17:52:00 -06:00
Greg Heartsfield
46160bb1f9 fix: correct name of gRPC configuration in toml 2023-02-13 17:30:26 -06:00
Greg Heartsfield
2fc9168a38 fix: SQL error with parameterized replaceable events 2023-02-13 17:10:42 -06:00
Greg Heartsfield
01d0d44868 build: bump version to 0.8.4 2023-02-13 09:34:30 -06:00
Greg Heartsfield
93f6337fda fix: upgrade docker image to include OpenSSL 3 2023-02-13 09:33:14 -06:00
Greg Heartsfield
f3a42712a6 build: bump version to 0.8.3 2023-02-13 08:08:28 -06:00
Greg Heartsfield
27361d064a improvement: upgrade multiple dependencies
Updating anyhow v1.0.68 -> v1.0.69
Updating axum v0.6.4 -> v0.6.6
Updating cxx v1.0.89 -> v1.0.90
Updating cxx-build v1.0.89 -> v1.0.90
Updating cxxbridge-flags v1.0.89 -> v1.0.90
Updating cxxbridge-macro v1.0.89 -> v1.0.90
Adding hermit-abi v0.3.1
Updating is-terminal v0.4.2 -> v0.4.3
Updating pest v2.5.4 -> v2.5.5
Updating pest_derive v2.5.4 -> v2.5.5
Updating pest_generator v2.5.4 -> v2.5.5
Updating pest_meta v2.5.4 -> v2.5.5
Updating proc-macro2 v1.0.50 -> v1.0.51
Updating raw-cpuid v10.6.0 -> v10.6.1
Updating rustix v0.36.7 -> v0.36.8
Updating serde_json v1.0.91 -> v1.0.93
Updating signal-hook-registry v1.4.0 -> v1.4.1
Updating thread_local v1.1.4 -> v1.1.7
Updating tinyvec_macros v0.1.0 -> v0.1.1
Updating tokio-native-tls v0.3.0 -> v0.3.1
Updating tokio-util v0.7.4 -> v0.7.7
2023-02-13 07:57:14 -06:00
Greg Heartsfield
3bafb611e5 build: install packages with sudo for github ci 2023-02-13 07:50:48 -06:00
Greg Heartsfield
b960ab70de build: add protobuf compiler to github ci workflow 2023-02-13 07:48:09 -06:00
Greg Heartsfield
15e2f097aa improvement: advise operator this upgrade may take a minute 2023-02-13 07:37:13 -06:00
Greg Heartsfield
185f9e7abb feat: improved query performance when looking for deletion events (improves event insert time) 2023-02-12 15:43:22 -06:00
Greg Heartsfield
f44dae6ac9 fix: use correct start time for logging SQL generation 2023-02-12 15:00:50 -06:00
Greg Heartsfield
abc356c17d perf(sqlite): index tags with their kind/created_at fields
This updates the DB schema to remove the distinction between hex and
non-hex tag values, for simplicity.  The space savings did not seem to
be worth the extra complexity.

The SQLite tags table is denormalized to duplicate kind/created_at to
improve the ability of tag indexes to filter data.
2023-02-12 14:33:40 -06:00
Greg Heartsfield
81f8256c37 fix: container builds support protobuf compilation 2023-02-11 14:30:42 -06:00
Greg Heartsfield
b3db2bd081 fix: protobuf compiler not needed in runtime container 2023-02-11 13:57:53 -06:00
Greg Heartsfield
d31e974d56 fix: add protobuf-compiler for Docker and CI builds 2023-02-11 13:56:15 -06:00
Greg Heartsfield
36eaf9fea5 improvement: make comments match code for nauthz example 2023-02-11 13:36:10 -06:00
Greg Heartsfield
a16c4e698a feat: gRPC authorization for events
closes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/46
2023-02-11 13:26:08 -06:00
Greg Heartsfield
e63d179424 fix: prevent loop when nip05 metadata channel closes 2023-02-11 13:26:08 -06:00
rorp
28b7b83a6e improvement: make config file location configurable via CLI args 2023-02-08 07:59:26 -06:00
Greg Heartsfield
2e42b1b86e improvement: log source IP for persisted events 2023-02-06 17:15:27 -06:00
Naoki Ikeguchi
bd07a11f50 refactor: Fix clippy warnings 2023-02-06 07:29:45 -06:00
Greg Heartsfield
bc4b45d4b8 docs: update DB maintenance for v0.8.x 2023-02-06 07:07:23 -06:00
thesimplekid
1ca5d652de format: postgres_migrations 2023-02-06 06:44:57 -06:00
thesimplekid
d7cceab8fc fix: tag table does not have a unique constraint
`cargo fmt` on the document.
2023-02-06 06:44:57 -06:00
Greg Heartsfield
2805a96e5b docs: nginx timeouts
suggested by Michael Dilger;
ref: https://snort.social/e/note15jtrt8zsrvckyv6hggmanwk43p50gvmxe30s62x9tt6x9hyruzaq6fca44
2023-02-05 17:17:13 -06:00
Greg Heartsfield
ac14a0759f docs: clarify wording around subscription limits 2023-02-03 13:08:31 -06:00
Greg Heartsfield
cdd4e5949f fix: correctly log SQL generation time 2023-02-03 10:39:41 -06:00
Greg Heartsfield
5999009779 improvement: increase connection cache size 2023-02-02 18:34:30 -06:00
Greg Heartsfield
e36c791c53 improvement: prevent spilling temp indexes to disk 2023-02-02 18:15:14 -06:00
Greg Heartsfield
d95adbcb3d build: bump version to 0.8.2 2023-02-02 16:21:45 -06:00
Greg Heartsfield
509736c56d improvement: update multiple dependencies
Updating cxx v1.0.88 -> v1.0.89
Updating cxx-build v1.0.88 -> v1.0.89
Updating cxxbridge-flags v1.0.88 -> v1.0.89
Updating cxxbridge-macro v1.0.88 -> v1.0.89
Updating heck v0.4.0 -> v0.4.1
Updating hyper v0.14.23 -> v0.14.24
Updating io-lifetimes v1.0.4 -> v1.0.5
Updating js-sys v0.3.60 -> v0.3.61
Updating parking_lot_core v0.9.6 -> v0.9.7
Updating sync_wrapper v0.1.1 -> v0.1.2
Updating wasm-bindgen v0.2.83 -> v0.2.84
Updating wasm-bindgen-backend v0.2.83 -> v0.2.84
Updating wasm-bindgen-futures v0.4.33 -> v0.4.34
Updating wasm-bindgen-macro v0.2.83 -> v0.2.84
Updating wasm-bindgen-macro-support v0.2.83 -> v0.2.84
Updating wasm-bindgen-shared v0.2.83 -> v0.2.84
Updating web-sys v0.3.60 -> v0.3.61
2023-02-02 16:12:49 -06:00
Greg Heartsfield
8004ea9b44 fix(NIP-33): only delete older events with matching 'd' tags 2023-02-02 16:09:17 -06:00
Greg Heartsfield
866c239cc9 improvement: simplify SQL queries for tags 2023-02-02 12:24:10 -06:00
Greg Heartsfield
6012b57e95 improvement: log connection details at INFO level 2023-02-02 11:55:41 -06:00
Greg Heartsfield
559541b160 build: bump version to 0.8.1 2023-02-01 18:16:08 -06:00
Greg Heartsfield
facaed7805 improvement: guidance for subscription limits 2023-02-01 18:09:30 -06:00
Greg Heartsfield
ba4fcd072a improvement: allow queries to be cancelled earlier (before SQL execution) 2023-02-01 18:09:30 -06:00
Greg Heartsfield
2b79099cfe improvement: drop slow readers more quickly 2023-02-01 18:09:30 -06:00
Greg Heartsfield
eb1d2d717d improvement: log sleeps due to full query_tx 2023-02-01 18:09:30 -06:00
Greg Heartsfield
e5e03d4378 improvement: log slow filter query time 2023-02-01 18:09:30 -06:00
Greg Heartsfield
c377b136aa improvement: prometheus metric for db connections (sqlite) 2023-02-01 18:09:30 -06:00
Greg Heartsfield
bca5614a82 perf: hold database handle through all filters when querying 2023-02-01 18:09:30 -06:00
Greg Heartsfield
f7550b4c61 improvement: more precise log message 2023-02-01 18:09:30 -06:00
Greg Heartsfield
1623bacd0d improvement(NIP-33): advertise support for parameterized replaceable events 2023-02-01 18:09:27 -06:00
Greg Heartsfield
2bbde8ad09 build: upgrade Rust to 1.67.0 2023-02-01 08:02:50 -06:00
Greg Heartsfield
a42004c30c improvement: update multiple dependencies
Updating async-trait v0.1.61 -> v0.1.64
Updating axum v0.6.2 -> v0.6.4
Updating axum-core v0.3.1 -> v0.3.2
Updating bumpalo v3.11.1 -> v3.12.0
Updating bytes v1.3.0 -> v1.4.0
Updating cc v1.0.78 -> v1.0.79
Updating clap v4.1.1 -> v4.1.4
Updating crc v3.0.0 -> v3.0.1
Updating cxx v1.0.86 -> v1.0.88
Updating cxx-build v1.0.86 -> v1.0.88
Updating cxxbridge-flags v1.0.86 -> v1.0.88
Updating cxxbridge-macro v1.0.86 -> v1.0.88
Updating either v1.8.0 -> v1.8.1
Updating futures v0.3.25 -> v0.3.26
Updating futures-channel v0.3.25 -> v0.3.26
Updating futures-core v0.3.25 -> v0.3.26
Updating futures-executor v0.3.25 -> v0.3.26
Updating futures-io v0.3.25 -> v0.3.26
Updating futures-macro v0.3.25 -> v0.3.26
Updating futures-sink v0.3.25 -> v0.3.26
Updating futures-task v0.3.25 -> v0.3.26
Updating futures-util v0.3.25 -> v0.3.26
Updating pest v2.5.3 -> v2.5.4
Updating pest_derive v2.5.3 -> v2.5.4
Updating pest_generator v2.5.3 -> v2.5.4
Updating pest_meta v2.5.3 -> v2.5.4
Updating proc-macro2 v1.0.49 -> v1.0.50
Updating rustix v0.36.6 -> v0.36.7
Updating security-framework v2.7.0 -> v2.8.2
Updating security-framework-sys v2.6.1 -> v2.8.0
Updating tokio v1.24.1 -> v1.25.0
Updating toml v0.5.10 -> v0.5.11
Updating unicode-bidi v0.3.8 -> v0.3.10
Updating unicode-segmentation v1.10.0 -> v1.10.1
Updating uuid v1.2.2 -> v1.3.0
2023-02-01 07:54:21 -06:00
Greg Heartsfield
9dd97908cf build: bump version to 0.8.0 2023-02-01 07:52:24 -06:00
Greg Heartsfield
ab749e9cf0 improvement: log mixed string/blob tag queries 2023-02-01 07:49:46 -06:00
Greg Heartsfield
1820e9c689 perf: separate out blob and string tag queries 2023-02-01 07:13:29 -06:00
Greg Heartsfield
2d3a35fe30 perf: force event hash index if filter uses ids 2023-02-01 06:46:35 -06:00
Greg Heartsfield
9c77b06f79 improvement: dedupe filters in a REQ 2023-01-31 18:09:43 -06:00
Greg Heartsfield
c8e8b71b91 fix: use accurate timer for slow queries, and use 250ms as cutoff 2023-01-31 18:09:43 -06:00
Greg Heartsfield
6d57adef73 improvement: log filter in a reusable format for slow queries 2023-01-31 18:09:43 -06:00
Greg Heartsfield
111eb4a10c perf: prevent sqlite readers from capturing worker thread pool and impacting writer latency 2023-01-31 18:09:23 -06:00
Greg Heartsfield
214f152c5d improvement: provide reason for abort in prometheus metric 2023-01-30 18:40:47 -06:00
Greg Heartsfield
3fcaf97a15 improvement: move sqlite connection acquisition into blocking thread 2023-01-30 18:02:40 -06:00
Greg Heartsfield
cec501b37f improvement: start timing for each new filter execution 2023-01-30 18:02:40 -06:00
Greg Heartsfield
2557c7f69c improvement: run filters as separate queries, to reduce complexity on SQLite query planner 2023-01-30 18:02:40 -06:00
Greg Heartsfield
3979a94726 improvement: do not force query to use index when limit is specified 2023-01-30 18:02:40 -06:00
Greg Heartsfield
71bdbfb425 improvement: query and exit early for events that get immediately replaced 2023-01-30 18:02:40 -06:00
Greg Heartsfield
b6798f96b6 improvement: add prometheus metrics, renaming others 2023-01-30 18:02:28 -06:00
w3irdrobot
c1152ce430 improvement(NIP-19): identify and parse NIP-19 addresses 2023-01-29 18:55:30 -06:00
thesimplekid
6f1a4e7d76 fix: postgres create index before column exists 2023-01-29 18:32:42 -06:00
Greg Heartsfield
1804bee912 feat(NIP-33): parameterized replaceable events for postgres 2023-01-29 18:30:08 -06:00
Greg Heartsfield
34db91940c improvement: prometheus metrics for aborted queries 2023-01-28 16:05:58 -06:00
Greg Heartsfield
0859e535ed improvement: remove origin label from connections metric
The origin header is controlled by clients, and there is no expiration
of any values that appear.  We would need to whitelist a set of known
origins in order to track this without giving someone the ability to
exhaust memory.
2023-01-28 15:46:26 -06:00
Greg Heartsfield
bdd4e43df4 improvement: show errors when writing new sqlite db
Build the sqlite writer pool first, which will provide a better error
message in the event the database is not writeable or readable.
2023-01-28 14:02:20 -06:00
Greg Heartsfield
dfa6985f44 docs: postgresql and NIP-33 added to README 2023-01-27 20:25:24 -06:00
Greg Heartsfield
57e1b53c13 feat: postgres migration schema v2
This primarily deals with correctly handling tag values.
2023-01-27 20:13:47 -06:00
Greg Heartsfield
53f83aa923 improvement: delete, do not hide, replaceable events 2023-01-24 08:04:42 -06:00
Greg Heartsfield
34a8f99d61 build: bump release to RC 1 2023-01-24 08:04:42 -06:00
Greg Heartsfield
c8f7420334 feat(NIP-33): parameterized replaceable events 2023-01-24 08:04:42 -06:00
Greg Heartsfield
e2869e8fad fix(NIP-16): do not replace events unless they are newer 2023-01-24 08:04:42 -06:00
Greg Heartsfield
5c07b2eca5 refactor: event is_ephemeral method 2023-01-24 08:04:42 -06:00
Greg Heartsfield
25752abe6b fix: run postgres migration on startup 2023-01-24 08:04:37 -06:00
Kieran
16f6e974c8 feat: add support for PostgreSQL as a backend repository 2023-01-22 16:26:54 -06:00
Rasmus Schlunsen
744d467a28 build: add github CI and badge 2023-01-22 15:50:32 -06:00
Greg Heartsfield
b094fbcabd fix: integration tests working 2023-01-22 11:10:21 -06:00
Kieran
4121c872bc feat: prometheus metrics
Prometheus metrics exposed at /metrics
2023-01-22 11:08:12 -06:00
Greg Heartsfield
6489e685ab refactor: reformat and remove tabs 2023-01-22 10:06:44 -06:00
Greg Heartsfield
6800c2e39d improvement: add NostrRepo trait, with sqlite implementation
This is inspired by the work of
v0l (https://github.com/v0l/nostr-rs-relay/).

A new trait abstracts the storage layer with an async API.  Rusqlite
is still used with worker threads, but this allows for Postgresql or
other backends to be used.

There may be bugs, this has not been rigorously tested.
2023-01-22 09:49:49 -06:00
Greg Heartsfield
e996d4c009 improvement: default to having a event creation rate limit (5/sec) 2023-01-20 11:10:43 -06:00
Paul Rollo
2331c881d7 docs: typo in database-maintenance.md
Add a missing `"`
2023-01-20 10:49:03 -06:00
Greg Heartsfield
585fdd3884 fix: use data_dir from config.toml if present
fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/64
2023-01-16 17:21:12 -06:00
Greg Heartsfield
cf3e67500f build: bump version to 0.7.17 2023-01-15 15:48:39 -06:00
Greg Heartsfield
1d19442cfd improvement: upgrade multiple dependencies
Updating crates.io index
Updating async-trait v0.1.60 -> v0.1.61
Updating axum v0.6.1 -> v0.6.2
Updating axum-core v0.3.0 -> v0.3.1
Updating clap v4.0.32 -> v4.1.1
Updating clap_derive v4.0.21 -> v4.1.0
Updating clap_lex v0.3.0 -> v0.3.1
Updating cxx v1.0.85 -> v1.0.86
Updating cxx-build v1.0.85 -> v1.0.86
Updating cxxbridge-flags v1.0.85 -> v1.0.86
Updating cxxbridge-macro v1.0.85 -> v1.0.86
Updating io-lifetimes v1.0.3 -> v1.0.4
Updating nom v7.1.2 -> v7.1.3
Updating parking_lot_core v0.9.5 -> v0.9.6
Updating pest v2.5.2 -> v2.5.3
Updating pest_derive v2.5.2 -> v2.5.3
Updating pest_generator v2.5.2 -> v2.5.3
Updating pest_meta v2.5.2 -> v2.5.3
Updating prost v0.11.5 -> v0.11.6
Updating prost-derive v0.11.5 -> v0.11.6
Updating prost-types v0.11.5 -> v0.11.6
Updating regex v1.7.0 -> v1.7.1
Updating schannel v0.1.20 -> v0.1.21
Removing sha1 v0.10.5
Adding sha2 v0.10.6
Updating termcolor v1.1.3 -> v1.2.0
Updating tokio v1.23.1 -> v1.24.1
Updating try-lock v0.2.3 -> v0.2.4
Removing windows-sys v0.36.1
Updating windows_aarch64_gnullvm v0.42.0 -> v0.42.1
Removing windows_aarch64_msvc v0.36.1
Removing windows_aarch64_msvc v0.42.0
Adding windows_aarch64_msvc v0.42.1
Removing windows_i686_gnu v0.36.1
Removing windows_i686_gnu v0.42.0
Adding windows_i686_gnu v0.42.1
Removing windows_i686_msvc v0.36.1
Removing windows_i686_msvc v0.42.0
Adding windows_i686_msvc v0.42.1
Removing windows_x86_64_gnu v0.36.1
Removing windows_x86_64_gnu v0.42.0
Adding windows_x86_64_gnu v0.42.1
Updating windows_x86_64_gnullvm v0.42.0 -> v0.42.1
Removing windows_x86_64_msvc v0.36.1
Removing windows_x86_64_msvc v0.42.0
Adding windows_x86_64_msvc v0.42.1
2023-01-15 15:46:33 -06:00
Greg Heartsfield
13cc24b5cd improvement: log blacklisted events 2023-01-15 15:42:27 -06:00
Greg Heartsfield
f543957b34 improvement: clear out hidden events during schema upgrade 2023-01-15 15:27:41 -06:00
Greg Heartsfield
7021f102e8 improvement: delete replaceable events 2023-01-15 15:13:10 -06:00
Greg Heartsfield
fddbf321bc perf: add indexes and force their use (authors) 2023-01-15 10:52:49 -06:00
Greg Heartsfield
3e7f2e21df perf: force authors index to be used if possible 2023-01-15 10:23:46 -06:00
Greg Heartsfield
9d9c6c78d1 improvement: refuse to insert events that would automatically be hidden 2023-01-15 10:01:01 -06:00
Greg Heartsfield
703b2efe6e refactor: replaceable check in event 2023-01-15 09:18:53 -06:00
Greg Heartsfield
0db6487ce3 fix: allow tokio tracing to be enabled
fixes https://github.com/scsibug/nostr-rs-relay/issues/48
2023-01-14 09:47:23 -06:00
Rasmus Schlunsen
ba987d3212 docs: update example nginx configuration to ensure A+ rating
config from https://www.ssllabs.com/ssltest/
2023-01-14 09:33:40 -06:00
Rasmus Schlunsen
73f4f60cc7 improvement: use clap for command line args 2023-01-14 09:22:11 -06:00
Greg Heartsfield
d06d227ebe improvement: lower REQ logging and note possible truncation 2023-01-11 16:56:40 -06:00
Greg Heartsfield
3519488c4e improvement: lower logging for failed REQ parses 2023-01-10 07:41:49 -06:00
Greg Heartsfield
fbd3315110 improvement: log REQ messages at debug level 2023-01-09 22:12:20 -06:00
Greg Heartsfield
3d3d1bde53 refactor: clippy suggestions 2023-01-09 22:12:04 -06:00
Greg Heartsfield
ed336111bb improvement: alert before long-running migration 2023-01-09 22:11:25 -06:00
Greg Heartsfield
8aed572989 docs: add link to relay setup 2023-01-09 21:33:59 -06:00
Greg Heartsfield
62e8da689d fix: do not force kind_created_at_index when there are tags 2023-01-06 12:57:48 -06:00
Greg Heartsfield
807d1aa384 improvement: log index names used 2023-01-06 12:50:52 -06:00
Greg Heartsfield
66a55b55b9 perf: new index, manually selected when appropriate 2023-01-06 12:17:30 -06:00
Greg Heartsfield
76c77c3e56 feat: bulk loading script for importing events 2023-01-06 12:16:19 -06:00
Greg Heartsfield
50daab8a6f refactor: make a standalone re-tagging function 2023-01-06 06:57:56 -06:00
Greg Heartsfield
ffd4e6f997 build: bump version to 0.7.16 2023-01-04 17:28:05 -06:00
Greg Heartsfield
bbd716963e improvement: update multiple dependencies
Updating anyhow v1.0.67 -> v1.0.68
Updating cxx v1.0.84 -> v1.0.85
Updating cxx-build v1.0.84 -> v1.0.85
Updating cxxbridge-flags v1.0.84 -> v1.0.85
Updating cxxbridge-macro v1.0.84 -> v1.0.85
Updating hermit-abi v0.1.19 -> v0.2.6
Updating libc v0.2.138 -> v0.2.139
Updating nom v7.1.1 -> v7.1.2
Updating num_cpus v1.14.0 -> v1.15.0
Updating once_cell v1.16.0 -> v1.17.0
Updating openssl v0.10.44 -> v0.10.45
Updating openssl-sys v0.9.79 -> v0.9.80
Updating pest v2.5.1 -> v2.5.2
Updating pest_derive v2.5.1 -> v2.5.2
Updating pest_generator v2.5.1 -> v2.5.2
Updating pest_meta v2.5.1 -> v2.5.2
Updating proc-macro2 v1.0.48 -> v1.0.49
Updating prost v0.11.3 -> v0.11.5
Updating prost-derive v0.11.2 -> v0.11.5
Updating prost-types v0.11.2 -> v0.11.5
Updating quote v1.0.22 -> v1.0.23
Updating serde v1.0.151 -> v1.0.152
Updating serde_derive v1.0.151 -> v1.0.152
Updating serde_json v1.0.90 -> v1.0.91
Updating syn v1.0.106 -> v1.0.107
Updating tokio v1.23.0 -> v1.23.1
2023-01-04 17:26:22 -06:00
Greg Heartsfield
ca95e8cf22 docs(NIP-26): reflect NIP-26 being disabled in README 2023-01-04 16:54:52 -06:00
Greg Heartsfield
e9d2a2cbd0 perf(NIP-26): temporarily disable NIP-26 delegated events 2023-01-04 16:51:22 -06:00
Greg Heartsfield
39a945b493 perf: separate author/delegated_by queries, minor improvement 2023-01-04 16:51:17 -06:00
Greg Heartsfield
9a84dc19e9 perf: author/kind index added (schema v13) 2023-01-04 16:51:02 -06:00
Greg Heartsfield
20c4bb42eb fix: correct log message 2023-01-03 21:24:46 -06:00
JesterHodl
0e519f6b77 feat: add --help and --version flags
fixes: https://github.com/scsibug/nostr-rs-relay/issues/42
2023-01-03 17:39:21 -06:00
Greg Heartsfield
3dd0f2c9c6 fix: do not run auto_vacuum on read-only connections 2023-01-03 17:32:55 -06:00
Greg Heartsfield
b7c8737166 improvement: enable auto_vacuum on database creation 2023-01-03 06:22:43 -06:00
Greg Heartsfield
c0b112c094 improvement: enable auto_vacuum on connections 2023-01-03 06:22:04 -06:00
Greg Heartsfield
cb283ac316 fix: ensure that replaceable events are handled correctly regardless of order receieved 2023-01-02 17:18:11 -06:00
Greg Heartsfield
2c6ac69bfd docs: remove incorrect comment 2023-01-02 15:41:17 -06:00
Greg Heartsfield
d929ae2752 improvement: define websocket send queue (unlimited->1024) 2023-01-02 15:39:28 -06:00
Greg Heartsfield
14fe9f9ee1 improvement: remove pauses for backups, likely not needed w/ WAL compaction 2023-01-02 15:38:30 -06:00
0xtr
7774db8c47 feat: add event kind blacklist
Adds a list to the config where you can specify which event kinds to blacklist.
The blacklist check will run right after verifying that the pubkey is allowed
to post events to the relay.
2022-12-27 17:10:34 -06:00
Greg Heartsfield
104ef2b9e1 build: bump version to 0.7.15 2022-12-27 17:04:48 -06:00
Greg Heartsfield
c06139ec99 docs: start of database maintenance tips 2022-12-27 17:00:14 -06:00
Greg Heartsfield
19ec89593d improvement: drop queries that are running during a checkpoint 2022-12-27 15:24:10 -06:00
Greg Heartsfield
27902bc5f4 improvement: move reader mutex closer to DB connection acquisition 2022-12-27 10:28:56 -06:00
Greg Heartsfield
d2adddaee4 improvement: extend allowed wal_checkpoint timeout to 10 sec 2022-12-27 10:13:14 -06:00
Greg Heartsfield
b23b3ce8ec improvement: block new readers when WAL is large 2022-12-27 09:48:07 -06:00
Greg Heartsfield
5f9fe1ce59 improvement: do not send realtime only filters to the DB (limit:0) 2022-12-26 12:20:36 -06:00
Greg Heartsfield
6a8c4ed1b5 build: bump version to 0.7.14 2022-12-26 11:26:48 -06:00
Greg Heartsfield
966c853700 docs: non-docker quick start 2022-12-26 10:34:09 -06:00
Greg Heartsfield
65fd0ed08b feat: increase wal_checkpoint time when WAL is large 2022-12-26 10:03:51 -06:00
Greg Heartsfield
0b51675b38 improvement: change suggestion and default for max sqlite DB readers 2022-12-25 11:17:08 -06:00
Greg Heartsfield
2e22334631 refactor: formatting 2022-12-25 11:06:30 -06:00
Greg Heartsfield
cb2ac4bf0f improvement: give threads unique names 2022-12-25 10:47:32 -06:00
Greg Heartsfield
38dc7789dc improvement: cleaner slow query logs 2022-12-25 10:47:32 -06:00
Greg Heartsfield
ce0e00ffb3 feat: log reader DB pool stats every minute 2022-12-25 10:47:32 -06:00
Greg Heartsfield
3e4ae4aeec feat: cache prepared statements and trace expanded SQL queries 2022-12-25 10:47:32 -06:00
Greg Heartsfield
c6a8807485 improvement: send error on empty-string prefix author/id searches 2022-12-25 10:47:32 -06:00
Greg Heartsfield
8137b6211c refactor: clippy suggestions 2022-12-24 10:29:47 -06:00
Greg Heartsfield
29effaae23 build: remove pre-commit rustfmt check 2022-12-24 10:29:30 -06:00
Greg Heartsfield
e5074f2e46 feat(NIP-28): replaceable kind 41 channel metadata events 2022-12-24 10:14:43 -06:00
Blake Jakopovic
4fd7643907 feat: change pub(crate) to pub for use as a library 2022-12-23 07:14:58 -06:00
Greg Heartsfield
1e1ec69175 build: remove unnecessary dockerfile mod script 2022-12-23 06:52:09 -06:00
benthecarman
e08647867c refactor: remove code duplication for simple_event 2022-12-23 06:39:50 -06:00
Greg Heartsfield
ae0f7171ed build: remove digest-locked docker base images 2022-12-23 06:30:59 -06:00
Greg Heartsfield
4f1a912f36 feat: log origin header from websocket requests
fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/33
2022-12-22 16:55:53 -06:00
Greg Heartsfield
95748647f0 build: bump version to 0.7.13 2022-12-22 16:27:34 -06:00
Greg Heartsfield
25480e837f fix: do not block writers for more than 1 second during checkpoints 2022-12-22 16:10:49 -06:00
Greg Heartsfield
b80b54cd9d improvement: reduce logging, especially for database pool size 2022-12-22 15:47:33 -06:00
Greg Heartsfield
8ea732cbe5 feat: perform regular database maintenance (60sec), without blocking main writer thread 2022-12-22 15:16:21 -06:00
Greg Heartsfield
0f68c4e5c2 refactor: formatting 2022-12-22 15:15:45 -06:00
Greg Heartsfield
dab2cd5792 wip: future changes to rustfmt 2022-12-22 15:13:54 -06:00
Greg Heartsfield
f411aa6fc2 fix: do not re-verify NIP-05 entries where metadata was deleted 2022-12-22 13:01:48 -06:00
Greg Heartsfield
d31bbda087 improvement: reduce lifetime of database connections 2022-12-22 13:01:12 -06:00
Greg Heartsfield
5917bc53b2 improvement: run maintenance every 60 seconds instead of by event count 2022-12-22 11:40:17 -06:00
Greg Heartsfield
91177c61a1 improvement: log reason for new event creation from nip05 2022-12-22 10:48:30 -06:00
Greg Heartsfield
53c2a8051c improvement: reduce logging 2022-12-22 10:29:27 -06:00
Greg Heartsfield
168cf513ac feat: perform full checkpoints and truncate WAL every 2k events 2022-12-22 10:11:05 -06:00
Greg Heartsfield
ea204761c9 fix: do not show slow queries more than once per sub 2022-12-20 15:41:50 -06:00
Greg Heartsfield
c270ae1434 improvement: reduce event count for db writer pauses 2022-12-20 15:25:24 -06:00
Greg Heartsfield
64bd983cb6 perf: every 5000 persisted events, pause for 500ms for backups
I have observed backups running for a very long time under heavy load,
this introduces some artificial delay to give the online backup enough
time to make progress.
2022-12-20 15:05:04 -06:00
Greg Heartsfield
1c153bc784 perf: shed DB query load when queue gets large 2022-12-20 13:23:21 -06:00
Greg Heartsfield
dc11d9a619 improvement: explicitly rollback transaction on duplicate event 2022-12-20 13:23:04 -06:00
Greg Heartsfield
cd1557787b improvement: log write pool 2022-12-20 13:21:57 -06:00
Greg Heartsfield
86bb7aeb9a improvement: function to check pool capacity 2022-12-20 10:07:01 -06:00
Greg Heartsfield
ce37fc1a2d build: bump version to 0.7.12 2022-12-19 14:50:42 -06:00
Greg Heartsfield
2cfd384339 perf: drop db handles that are not quickly read 2022-12-19 00:18:39 -06:00
Greg Heartsfield
8c013107f9 perf: increase upper bound for sqlite mmap 2022-12-18 23:19:43 -06:00
Greg Heartsfield
64a4466d30 perf: backing down on max_blocking_threads 2022-12-18 23:14:41 -06:00
Greg Heartsfield
1596c23eb4 perf: increase blocking threads now that contention is reduced 2022-12-18 22:46:32 -06:00
Greg Heartsfield
129badd4e1 perf: reduce per thread mmap allocation for DB 2022-12-18 22:45:32 -06:00
Greg Heartsfield
6f7c080180 improvement: reduce number of writer blocking threads from 4->2 2022-12-18 22:32:31 -06:00
Greg Heartsfield
af92561ef6 perf: remove shared cache mode (experiment) 2022-12-18 22:15:50 -06:00
Greg Heartsfield
d833a3e40d perf: reduce logging 2022-12-18 22:11:46 -06:00
Greg Heartsfield
462eb46642 build: bump version to 0.7.11 2022-12-18 20:52:01 -06:00
Greg Heartsfield
cf144d503d perf: reduce logging for slow queries 2022-12-18 20:47:11 -06:00
Greg Heartsfield
fb8375aef2 build: bump version to 0.7.10 2022-12-18 13:46:18 -06:00
Greg Heartsfield
88ac31b549 perf: increase channel size for DB communication 2022-12-18 13:44:28 -06:00
Greg Heartsfield
677b7d39e9 improvement: log slow requests that return zero results 2022-12-18 13:42:31 -06:00
Greg Heartsfield
b24d2f9aaa perf: set default blocking threads to lower value 2022-12-18 12:20:57 -06:00
Greg Heartsfield
7a3899d852 build: bump version to 0.7.9 2022-12-18 09:21:07 -06:00
Greg Heartsfield
818108b793 improvement: upgrade multiple dependencies
Updating anyhow v1.0.66 -> v1.0.67
Updating async-trait v0.1.59 -> v0.1.60
Updating cxx v1.0.83 -> v1.0.84
Updating cxx-build v1.0.83 -> v1.0.84
Updating cxxbridge-flags v1.0.83 -> v1.0.84
Updating cxxbridge-macro v1.0.83 -> v1.0.84
Updating itoa v1.0.4 -> v1.0.5
Updating link-cplusplus v1.0.7 -> v1.0.8
Updating proc-macro2 v1.0.47 -> v1.0.48
Updating quote v1.0.21 -> v1.0.22
Updating rustversion v1.0.9 -> v1.0.11
Updating ryu v1.0.11 -> v1.0.12
Updating scratch v1.0.2 -> v1.0.3
Updating serde v1.0.150 -> v1.0.151
Updating serde_derive v1.0.150 -> v1.0.151
Updating serde_json v1.0.89 -> v1.0.90
Updating syn v1.0.105 -> v1.0.106
Updating thiserror v1.0.37 -> v1.0.38
Updating thiserror-impl v1.0.37 -> v1.0.38
Updating unicode-ident v1.0.5 -> v1.0.6
2022-12-18 09:16:09 -06:00
Greg Heartsfield
d10348f7e1 feat: configurable blocking threads 2022-12-18 09:14:04 -06:00
Greg Heartsfield
8598e443d8 wip: add configuration for future feature (client concurrent db limits) 2022-12-17 23:19:48 -06:00
Greg Heartsfield
43222d44e5 feat: perform optimization after seeing many events 2022-12-17 23:18:54 -06:00
Greg Heartsfield
7c1516c4fb perf: add index for tags 2022-12-17 23:17:53 -06:00
Greg Heartsfield
0c72053a49 perf: increase mmap size to 1GB 2022-12-17 23:17:16 -06:00
Greg Heartsfield
3f32ff67ab improvement: minor logging 2022-12-17 23:11:14 -06:00
Greg Heartsfield
0b9778d6ca refactor: simplify tracking of subscriptions 2022-12-17 20:46:58 -06:00
Greg Heartsfield
9be04120c7 build: bump version to 0.7.8 2022-12-17 12:01:43 -06:00
Greg Heartsfield
cc06167e06 perf: add composite index for tag table 2022-12-17 12:01:20 -06:00
Greg Heartsfield
b6e33f044f improvement: limit db connection max lifetime 2022-12-17 10:47:35 -06:00
Greg Heartsfield
1b2c6f9fca build: bump version to 0.7.7 2022-12-17 10:09:44 -06:00
Greg Heartsfield
0d8d39ad22 feat: add rate limiting setting for subscription creation 2022-12-17 09:27:29 -06:00
Greg Heartsfield
0e851d4f71 build: bump version to 0.7.6 2022-12-17 07:51:57 -06:00
Greg Heartsfield
3c880b2f49 perf: pull distinct to outermost SQL 2022-12-17 07:49:28 -06:00
Greg Heartsfield
7a4c9266ec improvement: make hexsearch structs sortable 2022-12-17 07:49:05 -06:00
Greg Heartsfield
e8557d421b build: bump version to 0.7.5 2022-12-16 17:21:00 -06:00
Greg Heartsfield
7ca9c864f2 improvement: DB pool logging shows used connections directly 2022-12-16 17:01:49 -06:00
Greg Heartsfield
838aafd079 improvement: consistent log messages for client/sub ids 2022-12-16 15:22:27 -06:00
Greg Heartsfield
e554b10ac2 improvement: tweak sub/sql logging for slow queries 2022-12-16 14:55:45 -06:00
Greg Heartsfield
b0bfaa48fc improvement: ignore duplicate REQ messages 2022-12-16 14:37:02 -06:00
Greg Heartsfield
2e9b1b6ba7 docs: comment reason for force_no_match 2022-12-16 14:35:21 -06:00
Greg Heartsfield
4d9012d94c improvement: upgrade docker builder and base images 2022-12-16 14:33:08 -06:00
Greg Heartsfield
ffe7aac066 improvement: upgrade multiple dependencies
Updating aho-corasick v0.7.19 -> v0.7.20
Updating async-trait v0.1.58 -> v0.1.59
Updating axum v0.5.17 -> v0.6.1
Updating axum-core v0.2.9 -> v0.3.0
Updating bytes v1.2.1 -> v1.3.0
Updating cc v1.0.76 -> v1.0.78
Updating crossbeam-utils v0.8.12 -> v0.8.14
Updating cxx v1.0.82 -> v1.0.83
Updating cxx-build v1.0.82 -> v1.0.83
Updating cxxbridge-flags v1.0.82 -> v1.0.83
Updating cxxbridge-macro v1.0.82 -> v1.0.83
Updating flate2 v1.0.24 -> v1.0.25
Updating libc v0.2.137 -> v0.2.138
Updating matchit v0.5.0 -> v0.7.0
Updating miniz_oxide v0.5.4 -> v0.6.2
Updating openssl v0.10.42 -> v0.10.44
Updating openssl-sys v0.9.77 -> v0.9.79
Updating parking_lot_core v0.9.4 -> v0.9.5
Updating pest v2.4.1 -> v2.5.1
Updating pest_derive v2.4.1 -> v2.5.1
Updating pest_generator v2.4.1 -> v2.5.1
Updating pest_meta v2.4.1 -> v2.5.1
Updating prost v0.11.2 -> v0.11.3
Adding rustversion v1.0.9
Updating serde v1.0.147 -> v1.0.150
Updating serde_derive v1.0.147 -> v1.0.150
Updating serde_json v1.0.88 -> v1.0.89
Updating sha-1 v0.10.0 -> v0.10.1
Updating syn v1.0.103 -> v1.0.105
Updating tokio v1.22.0 -> v1.23.0
Updating tokio-macros v1.8.0 -> v1.8.2
Updating toml v0.5.9 -> v0.5.10
Updating tonic v0.8.2 -> v0.8.3
Updating tower-http v0.3.4 -> v0.3.5
Updating typenum v1.15.0 -> v1.16.0
2022-12-16 11:17:05 -06:00
Greg Heartsfield
f9695bd0a9 fix: db schema version updates correctly for v9 2022-12-16 10:01:49 -06:00
Greg Heartsfield
7c4bf5cc8f fix: run db migration for v9 2022-12-16 08:21:00 -06:00
Greg Heartsfield
e2de162931 feat: only show SQL in logs for slow queries unless tracing 2022-12-16 08:17:39 -06:00
Greg Heartsfield
4f606615eb perf: indexing improvement 2022-12-16 08:16:49 -06:00
Greg Heartsfield
84a58ebbcd build: bump version to 0.7.3 2022-12-16 06:32:00 -06:00
Greg Heartsfield
c48e45686d perf: schema updates for better event indexing 2022-12-15 08:48:35 -06:00
Greg Heartsfield
bbe359364a refactor: clippy warnings 2022-12-15 08:43:36 -06:00
Greg Heartsfield
9e9c494367 perf: significant query speedup when using kinds.
fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/51
2022-12-14 21:04:49 -06:00
Greg Heartsfield
5fa24bc9f1 fix: send EOSE when ids list is empty in subscriptions
Fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/50
2022-11-19 10:35:00 -06:00
Greg Heartsfield
4de7490d97 fix: send EOSE when authors list is empty in subscriptions
Fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/49
2022-11-19 10:00:38 -06:00
Greg Heartsfield
d0f63dc66e docs: update container instructions for rootless podman 2022-11-19 09:32:26 -06:00
Greg Heartsfield
06078648c8 build: bump version to 0.7.2 2022-11-19 07:55:52 -06:00
Greg Heartsfield
cc0fcc5d66 docs: add Cargo package metadata 2022-11-19 07:32:17 -06:00
Greg Heartsfield
dfb2096653 improvement: build auditable binary in docker 2022-11-19 07:11:39 -06:00
Greg Heartsfield
486508d192 improvement: upgrade multiple dependencies
Updating crates.io index
Updating cc v1.0.74 -> v1.0.76
Updating chrono v0.4.22 -> v0.4.23
Updating cxx v1.0.80 -> v1.0.82
Updating cxx-build v1.0.80 -> v1.0.82
Updating cxxbridge-flags v1.0.80 -> v1.0.82
Updating cxxbridge-macro v1.0.80 -> v1.0.82
Updating digest v0.10.5 -> v0.10.6
Updating hyper v0.14.22 -> v0.14.23
Updating indexmap v1.9.1 -> v1.9.2
Updating regex v1.6.0 -> v1.7.0
Updating regex-syntax v0.6.27 -> v0.6.28
Updating serde_json v1.0.87 -> v1.0.88
Updating tokio v1.21.2 -> v1.22.0
Updating uuid v1.2.1 -> v1.2.2
2022-11-19 06:52:06 -06:00
Greg Heartsfield
84b43c144b improvement: use locked cargo packages to build container images 2022-11-19 06:29:13 -06:00
Greg Heartsfield
110500bb46 feat(NIP-20): advertise support for NIP-20 in relay info/readme 2022-11-12 09:22:43 -06:00
Greg Heartsfield
83f6b11de7 refactor: clippy fix 2022-11-12 09:22:24 -06:00
William Casarin
6d1244434b feat(NIP-20): improve invalid event error messages
Instead of returning a NOTICE for invalid events, return a `OK false`
command result with a reason as to why the event is invalid.
2022-11-12 09:13:22 -06:00
William Casarin
5a91419d34 feat(NIP-20): send command results to clients
When submitting events to relays, clients currently have no way to know
if an event was successfully committed to the database. This NIP
introduces the concept of command results which are like NOTICE's except
provide more information about if an event was accepted or rejected.

A command result is a JSON object with the following structure that is
returned when an event is successfully saved to the database or
rejected:

	["OK", <event_id>, <true|false>, <message>]

nip20: https://github.com/nostr-protocol/nips/pull/62
2022-11-12 09:12:35 -06:00
William Casarin
7adc5c9af7 perf: dont create intermediate vecs when matching subs
Avoid creating intermediate vectors when matching subscriptions. We can
just iterate over the hashmap directly.
2022-11-09 07:30:43 -06:00
Greg Heartsfield
9dd4571bee refactor: reduce level of some common DB logs 2022-11-06 13:49:32 -06:00
Greg Heartsfield
9db5a26b9c refactor: more consistent logging messages 2022-11-05 16:11:20 -05:00
Greg Heartsfield
ac345b5744 refactor: do not quote server-generated client id in logs 2022-11-05 15:59:39 -05:00
Greg Heartsfield
675662c7fb improvement: upgrade docker builder and base images 2022-11-05 13:24:17 -05:00
Greg Heartsfield
505b0cb71f improvement: upgrade multiple dependencies
Updating anyhow v1.0.65 -> v1.0.66
Updating async-trait v0.1.57 -> v0.1.58
Updating axum v0.5.16 -> v0.5.17
Updating axum-core v0.2.8 -> v0.2.9
Updating base64 v0.13.0 -> v0.13.1
Updating bumpalo v3.11.0 -> v3.11.1
Updating cc v1.0.73 -> v1.0.74
Updating cxx v1.0.79 -> v1.0.80
Updating cxx-build v1.0.79 -> v1.0.80
Updating cxxbridge-flags v1.0.79 -> v1.0.80
Updating cxxbridge-macro v1.0.79 -> v1.0.80
Updating futures v0.3.24 -> v0.3.25
Updating futures-channel v0.3.24 -> v0.3.25
Updating futures-core v0.3.24 -> v0.3.25
Updating futures-executor v0.3.24 -> v0.3.25
Updating futures-io v0.3.24 -> v0.3.25
Updating futures-macro v0.3.24 -> v0.3.25
Updating futures-sink v0.3.24 -> v0.3.25
Updating futures-task v0.3.24 -> v0.3.25
Updating futures-util v0.3.24 -> v0.3.25
Updating getrandom v0.2.7 -> v0.2.8
Updating h2 v0.3.14 -> v0.3.15
Updating hyper v0.14.20 -> v0.14.22
Updating iana-time-zone v0.1.51 -> v0.1.53
Updating libc v0.2.135 -> v0.2.137
Updating mio v0.8.4 -> v0.8.5
Updating native-tls v0.2.10 -> v0.2.11
Updating num_cpus v1.13.1 -> v1.14.0
Updating once_cell v1.15.0 -> v1.16.0
Updating openssl-sys v0.9.76 -> v0.9.77
Updating parking_lot_core v0.9.3 -> v0.9.4
Updating pest v2.4.0 -> v2.4.1
Updating pest_derive v2.4.0 -> v2.4.1
Updating pest_generator v2.4.0 -> v2.4.1
Updating pest_meta v2.4.0 -> v2.4.1
Updating pkg-config v0.3.25 -> v0.3.26
Updating ppv-lite86 v0.2.16 -> v0.2.17
Updating prost v0.11.0 -> v0.11.2
Updating prost-derive v0.11.0 -> v0.11.2
Updating prost-types v0.11.1 -> v0.11.2
Updating serde v1.0.145 -> v1.0.147
Updating serde_derive v1.0.145 -> v1.0.147
Updating serde_json v1.0.86 -> v1.0.87
Updating syn v1.0.102 -> v1.0.103
  Adding windows-sys v0.42.0
  Adding windows_aarch64_gnullvm v0.42.0
  Adding windows_aarch64_msvc v0.42.0
  Adding windows_i686_gnu v0.42.0
  Adding windows_i686_msvc v0.42.0
  Adding windows_x86_64_gnu v0.42.0
  Adding windows_x86_64_gnullvm v0.42.0
  Adding windows_x86_64_msvc v0.42.0
2022-11-05 10:59:03 -05:00
Greg Heartsfield
e8aa450802 build: bump version to 0.7.1 2022-11-05 10:35:38 -05:00
Greg Heartsfield
5a8860bb09 feat: log user-agent if present 2022-11-05 10:29:25 -05:00
Greg Heartsfield
11e43eccf9 refactor: add unit to ping_interval config 2022-11-05 07:42:08 -05:00
William Casarin
50577b2dfa feat: add network.ping_interval setting
Add a ping interval setting that allows you to customize the websocket
ping interval. The default of 5 minutes may be too high for some proxy
servers that disconnect connections that are held open for too long.
2022-11-05 07:40:28 -05:00
William Casarin
a6cb6f8486 refactor: rename get_header_remote_ip -> get_header_string
This function has nothing to do with remote ips!
2022-11-05 07:37:18 -05:00
Greg Heartsfield
ae5bf98d87 feat: retrieve client IP from header in config.toml
If the config.toml has defined a HTTP header to look for a remote IP,
that will be logged.  Otherwise, the socket address IP will be used.

closes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/47
2022-11-04 18:05:01 -05:00
William Casarin
1cf9d719f0 feat: look for proxied ip headers
This enables support for using the proxied IP from cloudflare. The damus
relay is behind cloudflare, so to get accurate remote ip logging we need
to look at the headers instead of the socket address.

Signed-off-by: William Casarin <jb55@jb55.com>
2022-11-04 17:09:28 -05:00
William Casarin
311f4b5283 refactor: switch new connections to debug log
These are pretty spammy on busy relays. I've been using the info log to
monitor spam attacks, and these are the least useful info log.

Leave the "stopping connection" log because it at least provides useful
sent/received information.

Signed-off-by: William Casarin <jb55@jb55.com>
2022-11-04 07:59:53 -05:00
Greg Heartsfield
14b5a51e3a fix: log ephemeral events after send 2022-11-04 07:55:38 -05:00
Greg Heartsfield
8ecce3f566 feat: show client IP in logs 2022-11-02 18:33:44 -05:00
Greg Heartsfield
caffbbbede build: bump version to 0.7.0 2022-10-16 15:42:11 -05:00
Greg Heartsfield
81045ad3d0 improvement: upgrade multiple dependencies
Updating anyhow v1.0.64 -> v1.0.65
  Adding codespan-reporting v0.11.1
Updating const_format v0.2.28 -> v0.2.30
Updating const_format_proc_macros v0.2.22 -> v0.2.29
Updating crossbeam-utils v0.8.11 -> v0.8.12
  Adding cxx v1.0.79
  Adding cxx-build v1.0.79
  Adding cxxbridge-flags v1.0.79
  Adding cxxbridge-macro v1.0.79
Updating digest v0.10.3 -> v0.10.5
Updating hdrhistogram v7.5.1 -> v7.5.2
Updating iana-time-zone v0.1.50 -> v0.1.51
  Adding iana-time-zone-haiku v0.1.1
Updating itertools v0.10.3 -> v0.10.5
Updating itoa v1.0.3 -> v1.0.4
Updating js-sys v0.3.59 -> v0.3.60
Updating libc v0.2.132 -> v0.2.135
  Adding link-cplusplus v1.0.7
Updating lock_api v0.4.8 -> v0.4.9
Updating once_cell v1.14.0 -> v1.15.0
Updating openssl v0.10.41 -> v0.10.42
Updating openssl-sys v0.9.75 -> v0.9.76
Updating pest v2.3.0 -> v2.4.0
Updating pest_derive v2.3.0 -> v2.4.0
Updating pest_generator v2.3.0 -> v2.4.0
Updating pest_meta v2.3.0 -> v2.4.0
Updating proc-macro2 v1.0.43 -> v1.0.47
Updating rand_core v0.6.3 -> v0.6.4
Updating raw-cpuid v10.5.0 -> v10.6.0
  Adding scratch v1.0.2
Updating serde v1.0.144 -> v1.0.145
Updating serde_derive v1.0.144 -> v1.0.145
Updating serde_json v1.0.85 -> v1.0.86
  Adding sha1 v0.10.5
Updating smallvec v1.9.0 -> v1.10.0
Updating syn v1.0.99 -> v1.0.102
  Adding termcolor v1.1.3
Updating thiserror v1.0.34 -> v1.0.37
Updating thiserror-impl v1.0.34 -> v1.0.37
Updating tokio v1.21.0 -> v1.21.2
Updating tokio-stream v0.1.9 -> v0.1.11
Updating tonic v0.8.1 -> v0.8.2
Updating tower-layer v0.3.1 -> v0.3.2
Updating tracing v0.1.36 -> v0.1.37
Updating tracing-attributes v0.1.22 -> v0.1.23
Updating tracing-core v0.1.29 -> v0.1.30
Updating tracing-subscriber v0.3.15 -> v0.3.16
Updating unicode-ident v1.0.3 -> v1.0.5
Updating unicode-normalization v0.1.21 -> v0.1.22
  Adding unicode-width v0.1.10
Updating uuid v1.1.2 -> v1.2.1
Updating wasm-bindgen v0.2.82 -> v0.2.83
Updating wasm-bindgen-backend v0.2.82 -> v0.2.83
Updating wasm-bindgen-macro v0.2.82 -> v0.2.83
Updating wasm-bindgen-macro-support v0.2.82 -> v0.2.83
Updating wasm-bindgen-shared v0.2.82 -> v0.2.83
Updating web-sys v0.3.59 -> v0.3.60
  Adding winapi-util v0.1.5
2022-10-16 15:33:11 -05:00
Greg Heartsfield
72f8a1aa5c feat(NIP-26): allow searches for delegated public keys
Implements core NIP-26 delegated event functionality.  Events can
include a `delegation` tag that provides a signature and restrictions
on which events can be delegated.

Notable points on the implementation so far:

* Schema has been upgraded to include an index and new column.
* Basic rune parsing/evaluation to implement the example event in the
  NIP, but no more.
* No special logic for deletion.
* No migration logic for determining delegated authors for
  already-stored events.
2022-10-16 15:25:06 -05:00
Greg Heartsfield
274c61bb72 improvement: upgrade docker images for base & builder 2022-10-13 18:42:55 -05:00
Greg Heartsfield
6eeefbcc4c feat: quick script for making non-x86 Dockerfiles 2022-10-13 18:35:33 -05:00
Greg Heartsfield
3e8adf978f refactor: move db migrations into isolated functions 2022-10-09 08:54:03 -05:00
Greg Heartsfield
2af5f9fbe8 fix: correct schema upgrade logic (and refactor)
Schema upgrades were buggy from 4->5 (the v5 would be skipped).  This
change also refactors the logic slightly so that future additions can
be clearer (no need to have if and else-if combinations).
2022-10-09 08:24:01 -05:00
Greg Heartsfield
2739e49362 fix: correct future schema version detection 2022-10-08 13:15:48 -05:00
Greg Heartsfield
f9693f7ac3 fix(NIP-9): hide events received after their deletions
fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/40
2022-10-08 12:12:41 -05:00
Greg Heartsfield
8a63d88b0b fix: prevent deletion of deletion events 2022-10-08 08:02:16 -05:00
Greg Heartsfield
a4df9445b6 test: improve port selection 2022-09-28 07:55:06 -05:00
Greg Heartsfield
92da9d71f8 feat: handle and log TERM signals 2022-09-28 07:20:31 -05:00
Greg Heartsfield
6633f8b472 feat: replace logging with tracing 2022-09-28 07:19:59 -05:00
Greg Heartsfield
93dfed0a87 refactor: misc clippy suggestions 2022-09-24 19:28:02 -05:00
Greg Heartsfield
bef7ca7e27 refactor: misc clippy suggestions 2022-09-24 09:19:16 -05:00
Greg Heartsfield
a98708ba47 refactor: misc clippy suggestions 2022-09-24 09:01:09 -05:00
Greg Heartsfield
ccf9b8d47b refactor: remove unnecessary return types 2022-09-24 08:39:41 -05:00
Greg Heartsfield
8fa58de49a refactor: clippy suggestions 2022-09-24 08:30:22 -05:00
Greg Heartsfield
480c5e4e58 docs: un-link NIP-22 note 2022-09-19 19:34:11 -05:00
dzdidi
5bd00f9107 docs: add refs for nostr-protocol organization
Signed-off-by: dzdidi <deniszalessky@gmail.com>
2022-09-19 19:26:36 -05:00
Greg Heartsfield
36b9f628c7 test: check for relay health after startup 2022-09-17 16:02:57 -05:00
Greg Heartsfield
baeb77af99 test: dynamically find open port for test relay 2022-09-17 14:36:05 -05:00
Greg Heartsfield
29b1e8ce58 refactor: move common test code into module 2022-09-17 12:37:49 -05:00
Greg Heartsfield
786a354776 test: simple integration test to start and stop relay 2022-09-11 12:54:24 -05:00
Greg Heartsfield
4fa8616c73 feat: enable use of tokio-console with diagnostics.tracing setting
View real-time tokio diagnostics by setting the configuration option
"diagnostics.tracing" to true.
2022-09-11 12:44:45 -05:00
Greg Heartsfield
74802522c2 improvement: do not create NIP-05 thread if feature is disabled 2022-09-11 11:01:36 -05:00
Greg Heartsfield
9ce5057af8 improvement: better log formatting 2022-09-11 10:22:01 -05:00
Greg Heartsfield
217429f538 build: add release flags, save artifacts 2022-09-11 10:21:29 -05:00
Greg Heartsfield
62a9548c27 docs: show build status for master branch only 2022-09-10 22:53:41 -05:00
Greg Heartsfield
c24dce8177 docs: add build status indicator 2022-09-10 22:48:23 -05:00
Greg Heartsfield
3503cf05ed build: add sr.ht build manifest 2022-09-10 22:43:56 -05:00
Greg Heartsfield
8738e5baa9 improvement: upgrade multiple dependencies
Updating aho-corasick v0.7.18 -> v0.7.19
Updating block-buffer v0.10.2 -> v0.10.3
Updating cpufeatures v0.2.4 -> v0.2.5
Updating form_urlencoded v1.0.1 -> v1.1.0
Updating idna v0.2.3 -> v0.3.0
Removing matches v0.1.9
Updating percent-encoding v2.1.0 -> v2.2.0
Updating thiserror v1.0.33 -> v1.0.34
Updating thiserror-impl v1.0.33 -> v1.0.34
Updating tokio-util v0.7.3 -> v0.7.4
Updating ucd-trie v0.1.4 -> v0.1.5
Updating url v2.2.2 -> v2.3.1
2022-09-10 22:42:52 -05:00
Greg Heartsfield
78da92ccca feat: advertise support for NIP-09 and NIP-12 in relay info
NIP-01 prefix search, and NIP-12 generic tags are no longer marked as
experimental.

NIP-11 relay info advertises NIP-09 event deletion and NIP-12 generic
tag search support.
2022-09-10 20:45:09 -05:00
Greg Heartsfield
72f1c19b21 feat(NIP-22): advertise support for event created_at limits
The `reject_future_limits` option can now be disabled, and is by
default.

NIP-11 advertises support for created_at limits.

The message for future-dated events has been modified, to be closer to
the recommended example in the NIP.
2022-09-10 20:40:10 -05:00
Greg Heartsfield
283967f8cc docs: reference NIP-28 channel 2022-09-10 19:45:23 -05:00
Greg Heartsfield
08b011ad07 feat: ensure that WAL is truncated after checkpoint 2022-09-10 19:18:57 -05:00
Greg Heartsfield
2b03f11e5e refactor: remove global/singleton settings object 2022-09-06 06:12:07 -05:00
Greg Heartsfield
e48bae10e6 feat: support in-memory SQLite database 2022-09-06 06:06:01 -05:00
Greg Heartsfield
8774416b92 refactor: move nostr server into library 2022-09-06 05:56:04 -05:00
Greg Heartsfield
59933ce25e build: add pre-commit config 2022-09-06 05:44:22 -05:00
Greg Heartsfield
1b9f364e15 chore: rustfmt 2022-09-02 12:38:31 -05:00
Greg Heartsfield
4d983dd1e0 improvement: upgrade uuid dependency 2022-09-02 12:37:11 -05:00
Greg Heartsfield
11c33582ef improvement: remove useless carats from Cargo.toml deps 2022-09-02 12:35:02 -05:00
Greg Heartsfield
a754477a02 improvement: misc refactorings (clippy) 2022-09-02 12:26:00 -05:00
Greg Heartsfield
a843eaa939 improvement: db.rs from clippy 2022-09-02 10:30:51 -05:00
Greg Heartsfield
03a130b0b8 improvement: simplify config builder (clippy) 2022-09-02 10:18:16 -05:00
Greg Heartsfield
9124f4540a improvement: upgrade multiple dependencies
Updating cpufeatures v0.2.3 -> v0.2.4
Updating dashmap v5.3.4 -> v5.4.0
Updating futures v0.3.23 -> v0.3.24
Updating futures-channel v0.3.23 -> v0.3.24
Updating futures-core v0.3.23 -> v0.3.24
Updating futures-executor v0.3.23 -> v0.3.24
Updating futures-io v0.3.23 -> v0.3.24
Updating futures-macro v0.3.23 -> v0.3.24
Updating futures-sink v0.3.23 -> v0.3.24
Updating futures-task v0.3.23 -> v0.3.24
Updating futures-util v0.3.23 -> v0.3.24
Updating httparse v1.7.1 -> v1.8.0
Updating lock_api v0.4.7 -> v0.4.8
Updating once_cell v1.13.1 -> v1.14.0
Updating pest v2.2.1 -> v2.3.0
Updating pest_derive v2.2.1 -> v2.3.0
Updating pest_generator v2.2.1 -> v2.3.0
Updating pest_meta v2.2.1 -> v2.3.0
Updating serde v1.0.143 -> v1.0.144
Updating serde_derive v1.0.143 -> v1.0.144
Updating serde_json v1.0.83 -> v1.0.85
Updating socket2 v0.4.4 -> v0.4.7
Updating thiserror v1.0.32 -> v1.0.33
Updating thiserror-impl v1.0.32 -> v1.0.33
Updating tokio v1.20.1 -> v1.21.0
2022-09-02 10:08:14 -05:00
slaninas
77892b2064 fix: syntax error 2022-08-22 05:12:52 -07:00
Greg Heartsfield
4fe6191aa3 chore: formatting 2022-08-21 09:51:34 -07:00
Greg Heartsfield
79a982e3ef improvement: send NOTICE for too-large messages 2022-08-21 09:28:31 -07:00
Greg Heartsfield
01d81db617 improvement: log client id for subscription removal 2022-08-21 09:11:38 -07:00
Greg Heartsfield
e6fef37d4e chore: rustfmt 2022-08-21 09:10:19 -07:00
plantimals
4bbfd77fc1 docs: add NGINX configuration example
resolves https://github.com/scsibug/nostr-rs-relay/issues/12
2022-08-20 09:35:01 -07:00
Greg Heartsfield
8da6f6555a build: bump version to 0.6.2 2022-08-18 17:52:16 -07:00
Greg Heartsfield
5bcc63bd56 improvement: upgrade multiple dependencies
Updating async-trait v0.1.56 -> v0.1.57
Removing block-buffer v0.7.3
Removing block-padding v0.1.5
Updating bumpalo v3.10.0 -> v3.11.0
Removing byte-tools v0.3.1
Updating bytes v1.1.0 -> v1.2.1
Updating cpufeatures v0.2.2 -> v0.2.3
Updating crossbeam-utils v0.8.10 -> v0.8.11
Updating crypto-common v0.1.4 -> v0.1.6
Removing digest v0.8.1
Removing fake-simd v0.1.2
Updating fastrand v1.7.0 -> v1.8.0
Updating futures v0.3.21 -> v0.3.23
Updating futures-channel v0.3.21 -> v0.3.23
Updating futures-core v0.3.21 -> v0.3.23
Updating futures-executor v0.3.21 -> v0.3.23
Updating futures-io v0.3.21 -> v0.3.23
Updating futures-macro v0.3.21 -> v0.3.23
Updating futures-sink v0.3.21 -> v0.3.23
Updating futures-task v0.3.21 -> v0.3.23
Updating futures-util v0.3.21 -> v0.3.23
Removing generic-array v0.12.4
Removing generic-array v0.14.5
Adding   generic-array v0.14.6
Updating h2 v0.3.13 -> v0.3.14
Updating hashbrown v0.12.1 -> v0.12.3
Updating hyper v0.14.19 -> v0.14.20
Updating itoa v1.0.2 -> v1.0.3
Updating js-sys v0.3.58 -> v0.3.59
Updating libc v0.2.126 -> v0.2.132
Removing maplit v1.0.2
Updating once_cell v1.12.1 -> v1.13.1
Removing opaque-debug v0.2.3
Updating openssl v0.10.40 -> v0.10.41
Updating openssl-sys v0.9.74 -> v0.9.75
Updating pest v2.1.3 -> v2.2.1
Updating pest_derive v2.1.0 -> v2.2.1
Updating pest_generator v2.1.3 -> v2.2.1
Updating pest_meta v2.1.3 -> v2.2.1
Updating proc-macro2 v1.0.40 -> v1.0.43
Updating quote v1.0.20 -> v1.0.21
Updating raw-cpuid v10.3.0 -> v10.5.0
Updating redox_syscall v0.2.13 -> v0.2.16
Updating regex v1.5.6 -> v1.6.0
Updating regex-syntax v0.6.26 -> v0.6.27
Updating ryu v1.0.10 -> v1.0.11
Updating security-framework v2.6.1 -> v2.7.0
Updating serde v1.0.138 -> v1.0.143
Updating serde_derive v1.0.138 -> v1.0.143
Updating serde_json v1.0.82 -> v1.0.83
Removing sha-1 v0.8.2
Updating slab v0.4.6 -> v0.4.7
Updating syn v1.0.98 -> v1.0.99
Updating thiserror v1.0.31 -> v1.0.32
Updating thiserror-impl v1.0.31 -> v1.0.32
Updating tokio v1.19.2 -> v1.20.1
Updating tokio-tungstenite v0.17.1 -> v0.17.2
Updating tracing v0.1.35 -> v0.1.36
Updating tracing-core v0.1.28 -> v0.1.29
Updating tungstenite v0.17.2 -> v0.17.3
Updating ucd-trie v0.1.3 -> v0.1.4
Updating unicode-ident v1.0.1 -> v1.0.3
Updating wasm-bindgen v0.2.81 -> v0.2.82
Updating wasm-bindgen-backend v0.2.81 -> v0.2.82
Updating wasm-bindgen-macro v0.2.81 -> v0.2.82
Updating wasm-bindgen-macro-support v0.2.81 -> v0.2.82
Updating wasm-bindgen-shared v0.2.81 -> v0.2.82
Updating web-sys v0.3.58 -> v0.3.59
2022-08-18 17:21:53 -07:00
Greg Heartsfield
035cf34673 fix(NIP-12): correctly search for mixed-case hex-like tags
Only lowercase and even-length tag values are stored as binary BLOBs.
Previously there was an error which search results from being returned
if the tag value was mixed-case and could be interpreted as hex.

A new database migration has been created to repair the `tag` table
for existing relays.

fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/37
2022-08-17 16:34:11 -07:00
Greg Heartsfield
be8170342e fix(NIP-12): multi-tag searches returns correct results
Logic of generated SQL was incorrect, causing multiple tag searches
(as defined in NIP-12) to produce no results.

fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/36
2022-08-11 22:16:10 -07:00
Greg Heartsfield
0a3b15f41f fix(NIP-11): Add CORS header and content type for main page 2022-08-11 19:33:17 -07:00
Kirill Kovalenko
2b4b17dbda fix: windows compilation with bundled sqlite3
Using 'bundled' is recommended by
https://github.com/rusqlite/rusqlite#usage to avoid common build
issues
2022-08-07 10:35:36 -05:00
Greg Heartsfield
5058d98ad6 fix(NIP-12): only allow single-char tag filters 2022-08-07 10:15:36 -05:00
Greg Heartsfield
f4ecd43708 build: bump version to 0.6.1 2022-07-04 17:41:16 -05:00
Greg Heartsfield
a8f465fdc8 improvement: upgrade docker base images (and specify explicit repository) 2022-07-04 17:35:17 -05:00
Greg Heartsfield
1c14adc766 fix(NIP-01): allow limits on a per-filter basis
The original implementation of subscription limit applied to the
entire query, instead of the specific filter.  Now, each filter gets
its own query limit.  When a limit is applied, the most recent N
events will be returned, otherwise the default is to return the
earliest events (in order), for all matching events.
2022-07-04 17:25:32 -05:00
Greg Heartsfield
e894a86566 docs: NIP-15, NIP-16 feature notes in README 2022-07-04 13:10:48 -05:00
Greg Heartsfield
bedc378624 improvement: upgrade multiple dependencies
Updating async-trait v0.1.53 -> v0.1.56
Updating bumpalo v3.9.1 -> v3.10.0
Updating crossbeam-utils v0.8.8 -> v0.8.10
Updating crypto-common v0.1.3 -> v0.1.4
Updating getrandom v0.2.6 -> v0.2.7
Updating http v0.2.7 -> v0.2.8
Updating indexmap v1.8.2 -> v1.9.1
Updating js-sys v0.3.57 -> v0.3.58
Updating linked-hash-map v0.5.4 -> v0.5.6
Updating mio v0.8.3 -> v0.8.4
Updating once_cell v1.12.0 -> v1.12.1
Updating openssl-sys v0.9.73 -> v0.9.74
Removing parking_lot v0.11.2
Removing parking_lot_core v0.8.5
Updating proc-macro2 v1.0.39 -> v1.0.40
Updating quote v1.0.18 -> v1.0.20
Updating r2d2 v0.8.9 -> v0.8.10
Updating ron v0.7.0 -> v0.7.1
Updating serde v1.0.137 -> v1.0.138
Updating serde_derive v1.0.137 -> v1.0.138
Updating serde_json v1.0.81 -> v1.0.82
Updating smallvec v1.8.0 -> v1.9.0
Updating syn v1.0.95 -> v1.0.98
Updating tokio v1.18.2 -> v1.19.2
Updating tokio-macros v1.7.0 -> v1.8.0
Updating tokio-util v0.7.2 -> v0.7.3
Updating tower-service v0.3.1 -> v0.3.2
Updating tracing v0.1.34 -> v0.1.35
Removing tracing-attributes v0.1.21
Updating tracing-core v0.1.26 -> v0.1.28
Updating unicode-ident v1.0.0 -> v1.0.1
Updating unicode-normalization v0.1.19 -> v0.1.21
Updating wasm-bindgen v0.2.80 -> v0.2.81
Updating wasm-bindgen-backend v0.2.80 -> v0.2.81
Updating wasm-bindgen-macro v0.2.80 -> v0.2.81
Updating wasm-bindgen-macro-support v0.2.80 -> v0.2.81
Updating wasm-bindgen-shared v0.2.80 -> v0.2.81
Updating web-sys v0.3.57 -> v0.3.58
2022-07-04 12:56:10 -05:00
Greg Heartsfield
e1c2a6b758 improvement: upgrade docker base image 2022-05-30 21:53:46 -05:00
Greg Heartsfield
990bb656e8 improvement: upgrade multiple dependencies
Cargo updated the following dependencies:

Updating dashmap v5.3.3 -> v5.3.4
Updating http-body v0.4.4 -> v0.4.5
Updating hyper v0.14.18 -> v0.14.19
Updating indexmap v1.8.1 -> v1.8.2
Updating itoa v1.0.1 -> v1.0.2
Updating libc v0.2.125 -> v0.2.126
Updating once_cell v1.10.0 -> v1.12.0
Updating parking_lot v0.12.0 -> v0.12.1
Updating proc-macro2 v1.0.38 -> v1.0.39
Updating regex v1.5.5 -> v1.5.6
Updating regex-syntax v0.6.25 -> v0.6.26
Updating ryu v1.0.9 -> v1.0.10
Updating schannel v0.1.19 -> v0.1.20
Updating scheduled-thread-pool v0.2.5 -> v0.2.6
Updating syn v1.0.93 -> v1.0.95
Updating tokio-util v0.7.1 -> v0.7.2

Adding unicode-ident v1.0.0

Removing unicode-xid v0.2.3
2022-05-30 21:47:24 -05:00
Semisol
168cfc3b26 feat(NIP-16): Implement NIP16
NIP16 introduces a replaceable and ephemeral event range:
[10000..20000) for replaceable and [20000..30000) for
ephemeral.
2022-05-30 21:43:06 -05:00
Semisol
a36ad378f6 feat(NIP-15): Implement NIP15
NIP15 sends an EOSE notice to clients after all stored events are sent
to allow loading indicators and other use cases.
2022-05-30 21:43:00 -05:00
Greg Heartsfield
538d139ebf improvement: upgrade docker base image 2022-05-10 21:24:22 -05:00
Greg Heartsfield
23f7730fea build: bump version to 0.6.0 2022-05-10 21:19:21 -05:00
Greg Heartsfield
8aa1256254 improvement: upgrade multiple dependencies 2022-05-10 17:07:18 -05:00
Greg Heartsfield
9ed3391b46 fix(NIP-09): correct WHERE clause for event deletion 2022-05-10 16:50:52 -05:00
William Casarin
4ad483090e feat(NIP-01): Implement limit
This was quickly sneaked in by fiatjaf per my request[0], it makes many
queries more efficient and allows for paging when combined with until.

It is a bit weird to have multiple limits on each filter... for now we
just choose any or the last limit seen.

[0]: a4aea5337f

Signed-off-by: William Casarin <jb55@jb55.com>
2022-05-10 16:47:56 -05:00
Greg Heartsfield
9b351aab9b docs: update devel discussion link 2022-02-28 17:19:24 -06:00
Greg Heartsfield
597749890e improvement: remove unnecessary event logging 2022-02-27 19:30:48 -06:00
Greg Heartsfield
1d499cf12b feat: handle NIP-09 for deletion events 2022-02-27 11:35:23 -06:00
Greg Heartsfield
ed3a6b9692 refactor: simplify NOTICE messages 2022-02-26 17:34:58 -06:00
Greg Heartsfield
048199e30b build: bump version to 0.5.2 2022-02-26 11:22:16 -06:00
Greg Heartsfield
414e83f696 refactor: import cleanup for config 2022-02-26 11:16:12 -06:00
Greg Heartsfield
225c8f762e improvement: upgrade dependencies; config, tungstenite, tokio 2022-02-26 09:55:12 -06:00
Greg Heartsfield
887fc28ab2 fix: until filters in subscriptions now used 2022-02-26 09:15:45 -06:00
Greg Heartsfield
294d3b99c3 fix: correct imports for test cases 2022-02-26 09:07:07 -06:00
Greg Heartsfield
53990672ae improvement: move db pool operations closer to query, do not panic on failure 2022-02-23 16:38:16 -06:00
Greg Heartsfield
9c1b21cbfe improvement: more granular perf logging for SQL queries 2022-02-21 09:03:05 -06:00
Greg Heartsfield
2f63417646 improvement: better logging for connection resets 2022-02-21 08:57:07 -06:00
Greg Heartsfield
3b25160852 fix: abort on connection IO errors 2022-02-21 08:50:46 -06:00
Greg Heartsfield
34ad549cde fix: update event buffer size comment in config 2022-02-20 11:46:24 -06:00
Greg Heartsfield
f8b1fe5035 docs: line up comments with code 2022-02-17 16:18:05 -06:00
Greg Heartsfield
f2001dc34a build: bump version to 0.5.1 2022-02-13 09:38:45 -06:00
Greg Heartsfield
b593001229 fix: remove setting from example config 2022-02-13 09:37:05 -06:00
Greg Heartsfield
5913b9f87a feat: send notices when authorization checks fail 2022-02-13 09:35:54 -06:00
Greg Heartsfield
77f35f9f43 feat: server-side pings and disconnects 2022-02-12 16:57:26 -06:00
Greg Heartsfield
9e06cc9482 improvement: better error messages on parse failures 2022-02-12 16:33:29 -06:00
Greg Heartsfield
e66fa4ac42 refactor: remove unnecessary Option wrapping 2022-02-12 16:29:27 -06:00
Greg Heartsfield
99e117f620 improvement: better handling of out-of-protocol messages 2022-02-12 16:26:55 -06:00
Greg Heartsfield
8250e00f05 fix: remove protostream module, and missing NOTICE 2022-02-12 16:22:12 -06:00
Greg Heartsfield
c9f87ec563 docs: NIP-05 feature note in README 2022-02-12 16:19:46 -06:00
Greg Heartsfield
ceaa01e8b4 fix: removed manual nostr stream, so websocket pings work 2022-02-12 16:19:10 -06:00
Greg Heartsfield
bc68cd0c74 build: bump version to 0.5.0 2022-02-12 14:10:44 -06:00
Greg Heartsfield
97589006fa improvement: upgrade dependencies 2022-02-12 14:10:03 -06:00
Greg Heartsfield
e31d0729f2 chore: comment cleanup 2022-02-12 13:49:52 -06:00
Greg Heartsfield
89d96e7ccd improvement: upgraded database schema to drop legacy tables
Database schema is upgraded to version 5.  Legacy event and pubkey
tables are dropped, and indexes are added for NIP-05 verification.
2022-02-12 13:47:03 -06:00
Greg Heartsfield
7056aae227 refactor: create schema module 2022-02-12 09:58:42 -06:00
Greg Heartsfield
753df47443 refactor: create utils/hexrange utility modules 2022-02-12 09:29:38 -06:00
Greg Heartsfield
26a0ce2b32 docs: function/struct comments 2022-02-12 09:29:35 -06:00
Greg Heartsfield
fa66a0265e docs: module headers 2022-02-12 09:29:31 -06:00
Greg Heartsfield
234a8ba0ac feat: limit event publishing to NIP-05 verified users
This adds a new configurable feature to restrict event publishing to
only users with NIP-05 verified metadata.  Domains can be whitelisted
or blacklisted.  Verification expiration and schedules are
configurable.

This upgrades the database to add a table for tracking verification
records.
2022-02-12 09:29:25 -06:00
Greg Heartsfield
f679fa0893 build: bump version to 0.4.2 2022-01-30 15:19:41 -06:00
Greg Heartsfield
4cc313fa2d fix: cleanup database connections with same name
When a large number of subscriptions is created with identical names,
we do not send a signal over the abandon-read channel.  This
eventually leads to resource exhaustion.
2022-01-30 15:14:02 -06:00
Greg Heartsfield
6502f7dcd7 fix: do not panic when validating events with malformed pubkeys 2022-01-29 13:19:34 -06:00
Greg Heartsfield
6ca3e3ffea build: bump version to 0.4.1 2022-01-26 21:48:44 -06:00
Greg Heartsfield
49c668a07c improvement: upgrade dependency (h2) 2022-01-26 21:48:11 -06:00
Greg Heartsfield
98c6fa6f39 feat: allow whitelisting of pubkeys for new events
This adds a configuration option, `authorization.pubkey_whitelist`
which is an array of pubkeys that are allowed to publish events on
this relay.
2022-01-26 21:39:03 -06:00
Greg Heartsfield
452bbbb0e5 docs: update feature list (NIP-12, prefix search) 2022-01-26 07:24:04 -06:00
Greg Heartsfield
ee0de6f875 improvement: clearer and less verbose database logging 2022-01-25 21:42:43 -06:00
59 changed files with 15500 additions and 2316 deletions

20
.build.yml Normal file

@@ -0,0 +1,20 @@
image: fedora/latest
arch: x86_64
artifacts:
- nostr-rs-relay/target/release/nostr-rs-relay
environment:
RUST_LOG: debug
packages:
- cargo
- sqlite-devel
- protobuf-compiler
sources:
- https://git.sr.ht/~gheartsfield/nostr-rs-relay/
shell: false
tasks:
- build: |
cd nostr-rs-relay
cargo build --release
- test: |
cd nostr-rs-relay
cargo test --release

2
.cargo/config.toml Normal file

@@ -0,0 +1,2 @@
[build]
rustflags = ["--cfg", "tokio_unstable"]

39
.github/workflows/ci.yml vendored Normal file

@@ -0,0 +1,39 @@
name: Test and build
on:
push:
branches:
- master
jobs:
test_nostr-rs-relay:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Update local toolchain
run: |
sudo apt-get install -y protobuf-compiler
rustup update
rustup component add clippy
rustup install nightly
- name: Toolchain info
run: |
cargo --version --verbose
rustc --version
cargo clippy --version
# - name: Lint
# run: |
# cargo fmt -- --check
# cargo clippy -- -D warnings
- name: Test
run: |
cargo check
cargo test --all
- name: Build
run: |
cargo build --release --locked

5
.gitignore vendored

@@ -1,2 +1,5 @@
/target
**/target/
nostr.db
nostr.db-*
justfile
result

16
.pre-commit-config.yaml Normal file

@@ -0,0 +1,16 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/doublify/pre-commit-rust
rev: v1.0
hooks:
# - id: fmt
- id: cargo-check
- id: clippy

3565
Cargo.lock generated

File diff suppressed because it is too large Load Diff

@@ -1,28 +1,68 @@
[package]
name = "nostr-rs-relay"
version = "0.4.0"
version = "0.9.0"
edition = "2021"
authors = ["Greg Heartsfield <scsibug@imap.cc>"]
description = "A relay implementation for the Nostr protocol"
readme = "README.md"
homepage = "https://sr.ht/~gheartsfield/nostr-rs-relay/"
repository = "https://git.sr.ht/~gheartsfield/nostr-rs-relay"
license = "MIT"
keywords = ["nostr", "server"]
categories = ["network-programming", "web-programming"]
[dependencies]
log = "^0.4"
env_logger = "^0.9"
tokio = { version = "^1.14", features = ["full"] }
futures = "^0.3"
futures-util = "^0.3"
tokio-tungstenite = "^0.16"
tungstenite = "^0.16"
thiserror = "^1"
uuid = { version = "^0.8", features = ["v4"] }
config = { version = "0.11", features = ["toml"] }
bitcoin_hashes = { version = "^0.9", features = ["serde"] }
secp256k1 = {git = "https://github.com/rust-bitcoin/rust-secp256k1.git", rev = "50034ccb18fdd84904ab3aa6c84a12fcced33209", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
serde = { version = "^1.0", features = ["derive"] }
serde_json = {version = "^1.0", features = ["preserve_order"]}
hex = "^0.4"
rusqlite = { version = "^0.26", features = ["limits"]}
r2d2 = "^0.8"
r2d2_sqlite = "^0.19"
lazy_static = "^1.4"
governor = "^0.4"
nonzero_ext = "^0.3"
hyper={ version="0.14", features=["server","http1","http2","tcp"] }
clap = { version = "4.0.32", features = ["env", "default", "derive"]}
tracing = "0.1.37"
tracing-appender = "0.2.2"
tracing-subscriber = "0.3.16"
tokio = { version = "1", features = ["full", "tracing", "signal"] }
prost = "0.11"
tonic = "0.8.3"
console-subscriber = "0.1.8"
futures = "0.3"
futures-util = "0.3"
tokio-tungstenite = "0.17"
tungstenite = "0.17"
thiserror = "1"
uuid = { version = "1.1.2", features = ["v4"] }
config = { version = "0.12", features = ["toml"] }
bitcoin_hashes = { version = "0.10", features = ["serde"] }
secp256k1 = {version = "0.21", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = {version = "1.0", features = ["preserve_order"]}
hex = "0.4"
rusqlite = { version = "0.26", features = ["limits","bundled","modern_sqlite", "trace"]}
r2d2 = "0.8"
r2d2_sqlite = "0.19"
lazy_static = "1.4"
governor = "0.4"
nonzero_ext = "0.3"
hyper = { version="0.14", features=["client", "server","http1","http2","tcp"] }
hyper-rustls = { version = "0.24" }
http = { version = "0.2" }
parse_duration = "2"
rand = "0.8"
const_format = "0.2.28"
regex = "1"
async-trait = "0.1.60"
async-std = "1.12.0"
sqlx = { version ="0.6.2", features=["runtime-tokio-rustls", "postgres", "chrono"]}
chrono = "0.4.23"
prometheus = "0.13.3"
indicatif = "0.17.3"
bech32 = "0.9.1"
url = "2.3.1"
qrcode = { version = "0.12.0", default-features = false, features = ["svg"] }
nostr = { version = "0.18.0", default-features = false, features = ["base", "nip04", "nip19"] }
log = "0.4"
cln-rpc = "0.1.9"
[target.'cfg(all(not(target_env = "msvc"), not(target_os = "openbsd")))'.dependencies]
tikv-jemallocator = "0.5"
[dev-dependencies]
anyhow = "1"
[build-dependencies]
tonic-build = { version="0.8.3", features = ["prost"] }

@@ -1,22 +1,32 @@
FROM rust:1.57 as builder
FROM docker.io/library/rust:1-bookworm as builder
RUN apt-get update \
&& apt-get install -y cmake protobuf-compiler \
&& rm -rf /var/lib/apt/lists/*
RUN USER=root cargo install cargo-auditable
RUN USER=root cargo new --bin nostr-rs-relay
WORKDIR ./nostr-rs-relay
COPY ./Cargo.toml ./Cargo.toml
COPY ./Cargo.lock ./Cargo.lock
RUN cargo build --release
# build dependencies only (caching)
RUN cargo auditable build --release --locked
# get rid of starter project code
RUN rm src/*.rs
# copy project source code
COPY ./src ./src
COPY ./proto ./proto
COPY ./build.rs ./build.rs
# build auditable release using locked deps
RUN rm ./target/release/deps/nostr*relay*
RUN cargo build --release
RUN cargo auditable build --release --locked
FROM docker.io/library/debian:bookworm-slim
FROM debian:buster-slim
ARG APP=/usr/src/app
ARG APP_DATA=/usr/src/app/db
RUN apt-get update \
&& apt-get install -y ca-certificates tzdata sqlite3 \
&& apt-get install -y ca-certificates tzdata sqlite3 libc6 \
&& rm -rf /var/lib/apt/lists/*
EXPOSE 8080
@@ -36,7 +46,7 @@ RUN chown -R $APP_USER:$APP_USER ${APP}
USER $APP_USER
WORKDIR ${APP}
ENV RUST_LOG=info
ENV RUST_LOG=info,nostr_rs_relay=info
ENV APP_DATA=${APP_DATA}
CMD ./nostr-rs-relay --db ${APP_DATA}

123
README.md

@@ -1,23 +1,42 @@
# [nostr-rs-relay](https://git.sr.ht/~gheartsfield/nostr-rs-relay)
This is a [nostr](https://github.com/fiatjaf/nostr) relay, written in
Rust. It currently supports the entire relay protocol, and has a
SQLite persistence layer.
This is a [nostr](https://github.com/nostr-protocol/nostr) relay,
written in Rust. It currently supports the entire relay protocol, and
persists data with SQLite. There is experimental support for
Postgresql.
The project master repository is available on
[sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is
mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
[![builds.sr.ht status](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master.svg)](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master?)
![Github CI](https://github.com/scsibug/nostr-rs-relay/actions/workflows/ci.yml/badge.svg)
## Features
NIPs with relay-specific implementation requirements are listed here.
[NIPs](https://github.com/nostr-protocol/nips) with a relay-specific implementation are listed here.
- [x] NIP-01: Core event model
- [x] NIP-01: Hide old metadata events
- [x] NIP-02: Hide old contact list events
- [ ] NIP-03: OpenTimestamps
- [ ] NIP-09: Event deletion
- [x] NIP-11: Relay information document
- [x] NIP-01: [Basic protocol flow description](https://github.com/nostr-protocol/nips/blob/master/01.md)
* Core event model
* Hide old metadata events
* Id/Author prefix search
- [x] NIP-02: [Contact List and Petnames](https://github.com/nostr-protocol/nips/blob/master/02.md)
- [ ] NIP-03: [OpenTimestamps Attestations for Events](https://github.com/nostr-protocol/nips/blob/master/03.md)
- [x] NIP-05: [Mapping Nostr keys to DNS-based internet identifiers](https://github.com/nostr-protocol/nips/blob/master/05.md)
- [x] NIP-09: [Event Deletion](https://github.com/nostr-protocol/nips/blob/master/09.md)
- [x] NIP-11: [Relay Information Document](https://github.com/nostr-protocol/nips/blob/master/11.md)
- [x] NIP-12: [Generic Tag Queries](https://github.com/nostr-protocol/nips/blob/master/12.md)
- [x] NIP-15: [End of Stored Events Notice](https://github.com/nostr-protocol/nips/blob/master/15.md)
- [x] NIP-16: [Event Treatment](https://github.com/nostr-protocol/nips/blob/master/16.md)
- [x] NIP-20: [Command Results](https://github.com/nostr-protocol/nips/blob/master/20.md)
- [x] NIP-22: [Event `created_at` limits](https://github.com/nostr-protocol/nips/blob/master/22.md) (_future-dated events only_)
- [ ] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md) (_implemented, but currently disabled_)
- [x] NIP-28: [Public Chat](https://github.com/nostr-protocol/nips/blob/master/28.md)
- [x] NIP-33: [Parameterized Replaceable Events](https://github.com/nostr-protocol/nips/blob/master/33.md)
- [x] NIP-40: [Expiration Timestamp](https://github.com/nostr-protocol/nips/blob/master/40.md)
- [x] NIP-42: [Authentication of clients to relays](https://github.com/nostr-protocol/nips/blob/master/42.md)
## Quick Start
@@ -26,15 +45,32 @@ application. Use a bind mount to store the SQLite database outside of
the container image, and map the container's 8080 port to a host port
(7000 in the example below).
The examples below start a rootless podman container, mapping a local
data directory and config file.
```console
$ docker build -t nostr-rs-relay .
$ podman build --pull -t nostr-rs-relay .
$ docker run -it -p 7000:8080 \
--mount src=$(pwd)/data,target=/usr/src/app/db,type=bind nostr-rs-relay
$ mkdir data
[2021-12-31T19:58:31Z INFO nostr_rs_relay] listening on: 0.0.0.0:8080
[2021-12-31T19:58:31Z INFO nostr_rs_relay::db] opened database "/usr/src/app/db/nostr.db" for writing
[2021-12-31T19:58:31Z INFO nostr_rs_relay::db] DB version = 2
$ podman unshare chown 100:100 data
$ podman run -it --rm -p 7000:8080 \
--user=100:100 \
-v $(pwd)/data:/usr/src/app/db:Z \
-v $(pwd)/config.toml:/usr/src/app/config.toml:ro,Z \
--name nostr-relay nostr-rs-relay:latest
Nov 19 15:31:15.013 INFO nostr_rs_relay: Starting up from main
Nov 19 15:31:15.017 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080
Nov 19 15:31:15.019 INFO nostr_rs_relay::server: db writer created
Nov 19 15:31:15.019 INFO nostr_rs_relay::server: control message listener started
Nov 19 15:31:15.019 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=4)
Nov 19 15:31:15.019 INFO nostr_rs_relay::db: opened database "/usr/src/app/db/nostr.db" for writing
Nov 19 15:31:15.019 INFO nostr_rs_relay::schema: DB version = 0
Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: database pragma/schema initialized to v7, and ready
Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: All migration scripts completed successfully. Welcome to v7.
Nov 19 15:31:15.521 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=128)
```
Use a `nostr` client such as
@@ -53,6 +89,48 @@ Text Note [81cf...2652] from 296a...9b92 5 seconds ago
A pre-built container is also available on DockerHub:
https://hub.docker.com/r/scsibug/nostr-rs-relay
## Build and Run (without Docker)
Building `nostr-rs-relay` requires an installation of Cargo & Rust: https://www.rust-lang.org/tools/install
The following OS packages will be helpful; on Debian/Ubuntu:
```console
$ sudo apt-get install build-essential cmake protobuf-compiler pkg-config libssl-dev
```
On OpenBSD:
```console
$ doas pkg_add rust protobuf
```
Clone this repository, and then build a release version of the relay:
```console
$ git clone -q https://git.sr.ht/\~gheartsfield/nostr-rs-relay
$ cd nostr-rs-relay
$ cargo build -q -r
```
The relay executable is now located in
`target/release/nostr-rs-relay`. In order to run it with logging
enabled, execute it with the `RUST_LOG` variable set:
```console
$ RUST_LOG=warn,nostr_rs_relay=info ./target/release/nostr-rs-relay
Dec 26 10:31:56.455 INFO nostr_rs_relay: Starting up from main
Dec 26 10:31:56.464 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080
Dec 26 10:31:56.466 INFO nostr_rs_relay::server: db writer created
Dec 26 10:31:56.466 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=2)
Dec 26 10:31:56.466 INFO nostr_rs_relay::db: opened database "./nostr.db" for writing
Dec 26 10:31:56.466 INFO nostr_rs_relay::schema: DB version = 11
Dec 26 10:31:56.467 INFO nostr_rs_relay::db: Built a connection pool "maintenance writer" (min=1, max=2)
Dec 26 10:31:56.467 INFO nostr_rs_relay::server: control message listener started
Dec 26 10:31:56.468 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=8)
```
You now have a running relay, on port `8080`. Use a `nostr` client or
`websocat` to connect and send/query for events.
## Configuration
The sample [`config.toml`](config.toml) file demonstrates the
@@ -63,6 +141,7 @@ be mounted into a docker container like so:
$ docker run -it -p 7000:8080 \
--mount src=$(pwd)/config.toml,target=/usr/src/app/config.toml,type=bind \
--mount src=$(pwd)/data,target=/usr/src/app/db,type=bind \
--mount src=$(pwd)/index.html,target=/usr/src/app/index.html,type=bind \
nostr-rs-relay
```
@@ -73,12 +152,18 @@ settings.
For examples of putting the relay behind a reverse proxy (for TLS
termination, load balancing, and other features), see [Reverse
Proxy](reverse-proxy.md).
Proxy](docs/reverse-proxy.md).
## Dev Channel
The current dev discussions for this project is happening at https://discord.gg/ufG6fH52Vk.
Drop in to query any development related questions.
For development discussions, please feel free to use the [sourcehut
mailing list](https://lists.sr.ht/~gheartsfield/nostr-rs-relay-devel).
License
---
This project is MIT licensed.
External Documentation and Links
---
* [BlockChainCaffe's Nostr Relay Setup Guide](https://github.com/BlockChainCaffe/Nostr-Relay-Setup-Guide)

7
build.rs Normal file

@@ -0,0 +1,7 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::configure()
.build_server(false)
.protoc_arg("--experimental_allow_proto3_optional")
.compile(&["proto/nauthz.proto"], &["proto"])?;
Ok(())
}

@@ -10,25 +10,78 @@ name = "nostr-rs-relay"
# Description
description = "A newly created nostr-rs-relay.\n\nCustomize this with your own info."
# Administrative contact pubkey
# Administrative contact pubkey (32-byte hex, not npub)
#pubkey = "0c2d168a4ae8ca58c9f1ab237b5df682599c6c7ab74307ea8b05684b60405d41"
# Administrative contact URI
#contact = "mailto:contact@example.com"
# Favicon location. Relative to the current directory. Assumes an
# ICO format.
#favicon = "favicon.ico"
# URL of Relay's icon.
#relay_icon = "https://example.test/img.png"
# Path to custom relay html page
#relay_page = "index.html"
[diagnostics]
# Enable tokio tracing (for use with tokio-console)
#tracing = false
[database]
# Database engine (sqlite/postgres). Defaults to sqlite.
# Support for postgres is currently experimental.
#engine = "sqlite"
# Directory for SQLite files. Defaults to the current directory. Can
# also be specified (and overriden) with the "--db dirname" command
# line option.
data_directory = "."
#data_directory = "."
# Database connection pool settings:
# Use an in-memory database instead of 'nostr.db'.
# Requires sqlite engine.
# Caution; this will not survive a process restart!
#in_memory = false
# Database connection pool settings for subscribers:
# Minimum number of SQLite reader connections
#min_conn = 4
#min_conn = 0
# Maximum number of SQLite reader connections
#max_conn = 128
# Maximum number of SQLite reader connections. Recommend setting this
# to approx the number of cores.
#max_conn = 8
# Database connection string. Required for postgres; not used for
# sqlite.
#connection = "postgresql://postgres:nostr@localhost:7500/nostr"
# Optional database connection string for writing. Use this for
# postgres clusters where you want to separate reads and writes to
# different nodes. Ignore for single-database instances.
#connection_write = "postgresql://postgres:nostr@localhost:7500/nostr"
[logging]
# Directory to store log files. Log files roll over daily.
#folder_path = "./log"
#file_prefix = "nostr-relay"
[grpc]
# gRPC interfaces for externalized decisions and other extensions to
# functionality.
#
# Events can be authorized through an external service, by providing
# the URL below. In the event the server is not accessible, events
# will be permitted. The protobuf3 schema used is available in
# `proto/nauthz.proto`.
# event_admission_server = "http://[::1]:50051"
# If the event admission server denies writes
# in any case (excluding spam filtering).
# This is reflected in the relay information document.
# restricts_write = true
[network]
# Bind to this network address
@@ -37,16 +90,45 @@ address = "0.0.0.0"
# Listen on this port
port = 8080
# If present, read this HTTP header for logging client IP addresses.
# Examples for common proxies, cloudflare:
#remote_ip_header = "x-forwarded-for"
#remote_ip_header = "cf-connecting-ip"
# Websocket ping interval in seconds, defaults to 5 minutes
#ping_interval = 300
[options]
# Reject events that have timestamps greater than this many seconds in
# the future. Defaults to rejecting anything greater than 30 minutes
# from the current time.
# the future. Recommended to reject anything greater than 30 minutes
# from the current time, but the default is to allow any date.
reject_future_seconds = 1800
[limits]
# Limit events created per second, averaged over one minute. Must be
# an integer. If not set (or set to 0), defaults to unlimited.
#messages_per_sec = 0
# an integer. If not set (or set to 0), there is no limit. Note:
# this is for the server as a whole, not per-connection.
#
# Limiting event creation is highly recommended if your relay is
# public!
#
#messages_per_sec = 5
# Limit client subscriptions created, averaged over one minute. Must
# be an integer. If not set (or set to 0), defaults to unlimited.
# Strongly recommended to set this to a low value such as 10 to ensure
# fair service.
#subscriptions_per_min = 0
# UNIMPLEMENTED...
# Limit how many concurrent database connections a client can have.
# This prevents a single client from starting too many expensive
# database queries. Must be an integer. If not set (or set to 0),
# defaults to unlimited (subject to subscription limits).
#db_conns_per_client = 0
# Limit blocking threads used for database connections. Defaults to 16.
#max_blocking_threads = 16
# Limit the maximum size of an EVENT message. Defaults to 128 KB.
# Set to 0 for unlimited.
@@ -59,9 +141,113 @@ reject_future_seconds = 1800
#max_ws_frame_bytes = 131072
# Broadcast buffer size, in number of events. This prevents slow
# readers from consuming memory. Defaults to 4096.
#broadcast_buffer = 4096
# readers from consuming memory.
#broadcast_buffer = 16384
# Event persistence buffer size, in number of events. This provides
# backpressure to senders if writes are slow. Defaults to 16.
#event_persist_buffer = 16
# backpressure to senders if writes are slow.
#event_persist_buffer = 4096
# Event kind blacklist. Events with these kinds will be discarded.
#event_kind_blacklist = [
# 70202,
#]
# Event kind allowlist. Events other than these kinds will be discarded.
#event_kind_allowlist = [
# 0, 1, 2, 3, 7, 40, 41, 42, 43, 44, 30023,
#]
# Rejects imprecise requests (kind only and author only etc)
# This is a temperary measure to improve the adoption of outbox model
# Its recommended to have this enabled
limit_scrapers = false
[authorization]
# Pubkey addresses in this array are whitelisted for event publishing.
# Only valid events by these authors will be accepted, if the variable
# is set.
#pubkey_whitelist = [
# "35d26e4690cbe1a898af61cc3515661eb5fa763b57bd0b42e45099c8b32fd50f",
# "887645fef0ce0c3c1218d2f5d8e6132a19304cdc57cd20281d082f38cfea0072",
#]
# Enable NIP-42 authentication
#nip42_auth = false
# Send DMs (kind 4 and 44) and gift wraps (kind 1059) only to their authenticated recipients
#nip42_dms = false
[verified_users]
# NIP-05 verification of users. Can be "enabled" to require NIP-05
# metadata for event authors, "passive" to perform validation but
# never block publishing, or "disabled" to do nothing.
#mode = "disabled"
# Domain names that will be prevented from publishing events.
#domain_blacklist = ["wellorder.net"]
# Domain names that are allowed to publish events. If defined, only
# events NIP-05 verified authors at these domains are persisted.
#domain_whitelist = ["example.com"]
# Consider an pubkey "verified" if we have a successful validation
# from the NIP-05 domain within this amount of time. Note, if the
# domain provides a successful response that omits the account,
# verification is immediately revoked.
#verify_expiration = "1 week"
# How long to wait between verification attempts for a specific author.
#verify_update_frequency = "24 hours"
# How many consecutive failed checks before we give up on verifying
# this author.
#max_consecutive_failures = 20
[pay_to_relay]
# Enable pay to relay
#enabled = false
# Node interface to use
#processor = "ClnRest/LNBits"
# The cost to be admitted to relay
#admission_cost = 4200
# The cost in sats per post
#cost_per_event = 0
# Url of node api
#node_url = "<node url>"
# LNBits api secret
#api_secret = "<ln bits api>"
# Path to CLN rune
#rune_path = "<rune path>"
# Nostr direct message on signup
#direct_message=false
# Terms of service
#terms_message = """
#This service (and supporting services) are provided "as is", without warranty of any kind, express or implied.
#
#By using this service, you agree:
#* Not to engage in spam or abuse the relay service
#* Not to disseminate illegal content
#* That requests to delete content cannot be guaranteed
#* To use the service in compliance with all applicable laws
#* To grant necessary rights to your content for unlimited time
#* To be of legal age and have capacity to use this service
#* That the service may be terminated at any time without notice
#* That the content you publish may be removed at any time without notice
#* To have your IP address collected to detect abuse or misuse
#* To cooperate with the relay to combat abuse or misuse
#* You may be exposed to content that you might find triggering or distasteful
#* The relay operator is not liable for content produced by users of the relay
#"""
# Whether or not new sign ups should be allowed
#sign_ups = false
# optional if `direct_message=false`
#secret_key = "<nostr nsec>"

@@ -0,0 +1,14 @@
[Unit]
Description=nostr-rs-relay
[Service]
User=REPLACE_WITH_YOUR_USERNAME
WorkingDirectory=/var/lib/nostr-rs-relay
Environment=RUST_LOG=warn,nostr_rs_relay=info
ExecStart=/usr/bin/nostr-rs-relay --config /etc/nostr-rs-relay/config.toml
TimeoutStopSec=10
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target

@@ -0,0 +1,129 @@
# Database Maintenance
`nostr-rs-relay` uses the SQLite embedded database to minimize
dependencies and overall footprint of running a relay. If traffic is
light, the relay should just run with very little need for
intervention. For heavily trafficked relays, there are a number of
steps that the operator may need to take to maintain performance and
limit disk usage.
This maintenance guide is current as of version `0.8.2`. Future
versions may incorporate and automate some of these steps.
## Backing Up the Database
To prevent data loss, the database should be backed up regularly. The
recommended method is to use the `sqlite3` command to perform an
"Online Backup". This can be done while the relay is running, queries
can still run and events will be persisted during the backup.
The following commands will perform a backup of the database to a
dated file, and then compress to minimize size:
```console
BACKUP_FILE=/var/backups/nostr/`date +%Y%m%d_%H%M`.db
sqlite3 -readonly /apps/nostr-relay/nostr.db ".backup $BACKUP_FILE"
sqlite3 $BACKUP_FILE "vacuum;"
bzip2 -9 $BACKUP_FILE
```
Nostr events are very compressible. Expect a compression ratio on the
order of 4:1, resulting in a 75% space saving.
## Vacuuming the Database
As the database is updated, it can become fragmented. Performing a
full `vacuum` will rebuild the entire database file, and can reduce
space. Running this may reduce the size of the database file,
especially if a large amount of data was updated or deleted.
```console
vacuum;
```
## Clearing Hidden Events
When events are deleted, the event is not actually removed from the
database. Instead, a flag `HIDDEN` is set to true for the event,
which excludes it from search results. High volume replacements from
profile or other replaceable events are deleted, not hidden, in the
current version of the relay.
In the current version, removing hidden events should not result in
significant space savings, but it can still be used if there is no
desire to hold on to events that can never be re-broadcast.
```console
PRAGMA foreign_keys = ON;
delete from event where HIDDEN=true;
```
## Manually Removing Events
For a variety of reasons, an operator may wish to remove some events
from the database. The only way of achieving this today is with
manually run SQL commands.
It is recommended to have a good backup prior to manually running SQL
commands!
In all cases, it is mandatory to enable foreign keys, and this must be
done for every connection. Otherwise, you will likely orphan rows in
the `tag` table.
### Deleting Specific Event
```console
PRAGMA foreign_keys = ON;
delete from event where event_hash=x'00000000000c1271675dc86e3e1dd1336827bccabb90dc4c9d3b4465efefe00e';
```
### Querying and Deleting All Events for Pubkey
```console
PRAGMA foreign_keys = ON;
select lower(hex(author)) as author, count(*) as c from event group by author order by c asc;
delete from event where author=x'000000000002c7831d9c5a99f183afc2813a6f69a16edda7f6fc0ed8110566e6';
```
### Querying and Deleting All Events of a Kind
```console
PRAGMA foreign_keys = ON;
select printf('%7d', kind), count(*) as c from event group by kind order by c;
delete from event where kind=70202;
```
### Deleting Old Events
In this scenario, we wish to delete any event that has been stored by
our relay for more than 1 month. Crucially, this is based on when the
event was stored, not when the event says it was created. If an event
has a `created` field of 2 years ago, but was first sent to our relay
yesterday, it would not be deleted in this scenario. Keep in mind, we
do not track anything for re-broadcast events that we already have, so
this is not a very effective way of implementing a "least recently
seen" policy.
```console
PRAGMA foreign_keys = ON;
DELETE FROM event WHERE first_seen < CAST(strftime('%s', date('now', '-30 day')) AS INT);
```
### Delete Profile Events with No Recent Events
Many users create profiles, post a "hello world" event, and then never
appear again (likely using an ephemeral keypair that was lost in the
browser cache). We can find these accounts and remove them after some
time.
```console
PRAGMA foreign_keys = ON;
TODO!
```

79
docs/grpc-extensions.md Normal file

@@ -0,0 +1,79 @@
# gRPC Extensions Design Document
The relay will be extensible through gRPC endpoints, definable in the
main configuration file. These will allow external programs to host
logic for deciding things such as, should this event be persisted,
should this connection be allowed, and should this subscription
request be registered. The primary goal is allow for relay operator
specific functionality that allows them to serve smaller communities
and reduce spam and abuse.
This will likely evolve substantially, the first goal is to get a
basic one-way service that lets an externalized program decide on
event persistence. This does not represent the final state of gRPC
extensibility in `nostr-rs-relay`.
## Considerations
Write event latency must not be significantly affected. However, the
primary reason we are implementing this is spam/abuse protection, so
we are willing to tolerate some increase in latency if that protects
us against outages!
The interface should provide enough information to make simple
decisions, without burdening the relay to do extra queries. The
decision endpoint will be mostly responsible for maintaining state and
gathering additional details.
## Design Overview
A gRPC server may be defined in the `config.toml` file. If it exists,
the relay will attempt to connect to it and send a message for each
`EVENT` command submitted by clients. If a successful response is
returned indicating the event is permitted, the relay continues
processing the event as normal. All existing whitelist, blacklist,
and `NIP-05` validation checks are still performed and MAY still
result in the event being rejected. If a successful response is
returned indicated the decision is anything other than permit, then
the relay MUST reject the event, and return a command result to the
user (using `NIP-20`) indicating the event was blocked (optionally
providing a message).
In the event there is an error in the gRPC interface, event processing
proceeds as if gRPC was disabled (fail open). This allows gRPC
servers to be deployed with minimal chance of causing a full relay
outage.
## Design Details
Currently one procedure call is supported, `EventAdmit`, in the
`Authorization` service. It accepts the following data in order to
support authorization decisions:
- The event itself
- The client IP that submitted the event
- The client's HTTP origin header, if one exists
- The client's HTTP user agent header, if one exists
- The public key of the client, if `NIP-42` authentication was
performed (not supported in the relay yet!)
- The `NIP-05` associated with the event's public key, if it is known
to the relay
A server providing authorization decisions will return the following:
- A decision to permit or deny the event
- An optional message that explains why the event was denied, to be
transmitted to the client
## Security Issues
There is little attempt to secure this interface, since it is intended
for use processes running on the same host. It is recommended to
ensure that the gRPC server providing the API is not exposed to the
public Internet. Authorization server implementations should have
their own security reviews performed.
A slow gRPC server could cause availability issues for event
processing, since this is performed on a single thread. Avoid any
expensive or long-running processes that could result from submitted
events, since any client can initiate a gRPC call to the service.

84
docs/pay-to-relay.md Normal file

@@ -0,0 +1,84 @@
# Pay to Relay Design Document
The relay with use payment as a form of spam prevention. In order to post to the relay a user must pay a set rate. There is also the option to require a payment for each note posted to the relay. There is no cost to read from the relay.
## Configuration
Currently, [LNBits](https://github.com/lnbits/lnbits) is implemented as the payment processor. LNBits exposes a simple API for creating invoices, to use this API create a wallet and on the right side find "API info" you will need to add the invoice/read key to this relays config file.
The below configuration will need to be added to config.toml
```
[pay_to_relay]
# Enable pay to relay
enabled = true
# The cost to be admitted to relay
admission_cost = 1000
# The cost in sats per post
cost_per_event = 0
# Url of lnbits api
node_url = "https://<IP of node>:5001/api/v1/payments"
# LNBits api secret
api_secret = "<LNbits api key>"
# Terms of service
terms_message = """This service ....
"""
# Whether or not new sign ups should be allowed
sign_ups = true
secret_key = "<nostr secret key to send dms>"
```
The LNBits instance must have a signed HTTPS a self signed certificate will not work.
## Design Overview
### Concepts
All authors are initially not admitted to write to the relay. There are two ways to gain access write to the relay. The first is by attempting to post the the relay, upon receiving an event from an author that is not admitted, the relay will send a direct message including the terms of service of the relay and a lighting invoice for the admission cost. Once this invoice is paid the author can write to the relay. For this method to work the author must be reading from the relay. An author can also pay and accept the terms of service via a webpage `https://<relay-url>/join`.
## Design Details
Authors are stored in a dedicated table. This tracks:
* `pubkey`
* `is_admitted` whether on no the admission invoice has been paid, accepting the terms of service.
* `balance` the current balance in sats of the author, used if there is a cost per post
* `tos_accepted_at` the timestamp of when the author accepted the tos
Invoice information is stored in a dedicated table. This tracks:
* `payment_hash` the payment hash of the lighting invoice
* `pubkey` of the author the invoice is issued to
* `invoice` bolt11 invoice
* `amount` in sats
* `status` (Paid/Unpaid/Expired)
* `description`
* `created_at` timestamp of creation
* `confirmed_at` timestamp of payment
### Event Handling
If "pay to relay" is enabled, all incoming events are evaluated to determine whether the author is on the relay's whitelist or if they have paid the admission fee and accepted the terms. If "pay per note" is enabled, there is an additional check to ensure that the author has enough balance, which is then reduced by the cost per note. If the author is on the whitelist, this balance check is not necessary.
### Integration
We have an existing database writer thread, which receives events and
attempts to persist them to disk. Once validated and persisted, these
events are broadcast to all subscribers.
When "pay to relay" is enabled, the writer must check if the author is admitted to post. If the author is not admitted to post the event is forwarded to the payment module. Where an invoice is generated, persisted and broadcast as an direct message to the author.
### Threat Scenarios
Some of these mitigation's are fully implemented, others are documented
simply to demonstrate a mitigation is possible.
### Sign up Spamming
*Threat*: An attacker generates a large number of new pubkeys publishing to the relays. Causing a large number of new invoices to be created for each new pubkey.
*Mitigation*: Rate limit number of new sign ups
### Admitted Author Spamming
*Threat*: An attacker gains write access by paying the admission fee, and then floods the relay with a large number of spam events.
*Mitigation*: The attacker's admission can be revoked and their admission fee will not be refunded. Enabling "cost per event" and increasing the admission cost can also discourage this type of behavior.

199
docs/reverse-proxy.md Normal file

@@ -0,0 +1,199 @@
# Reverse Proxy Setup Guide
It is recommended to run `nostr-rs-relay` behind a reverse proxy such
as `haproxy`, `nginx` or `traefik` to provide TLS termination. Simple examples
for `haproxy`, `nginx` and `traefik` configurations are documented here.
## Minimal HAProxy Configuration
Assumptions:
* HAProxy version is `2.4.10` or greater (older versions not tested).
* Hostname for the relay is `relay.example.com`.
* Your relay should be available over wss://relay.example.com
* Your (NIP-11) relay info page should be available on https://relay.example.com
* SSL certificate is located in `/etc/certs/example.com.pem`.
* Relay is running on port 8080.
* Limit connections to 400 concurrent.
* HSTS (HTTP Strict Transport Security) is desired.
* Only TLS 1.2 or greater is allowed.
```
global
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
frontend fe_prod
mode http
bind :443 ssl crt /etc/certs/example.com.pem alpn h2,http/1.1
bind :80
http-request set-header X-Forwarded-Proto https if { ssl_fc }
redirect scheme https code 301 if !{ ssl_fc }
acl host_relay hdr(host) -i -m beg relay.example.com
use_backend relay if host_relay
# HSTS (1 year)
http-response set-header Strict-Transport-Security max-age=31536000
backend relay
mode http
timeout connect 5s
timeout client 50s
timeout server 50s
timeout tunnel 1h
timeout client-fin 30s
option tcp-check
default-server maxconn 400 check inter 20s fastinter 1s
server relay 127.0.0.1:8080
```
### HAProxy Notes
You may experience WebSocket connection problems with Firefox if
HTTP/2 is enabled, for older versions of HAProxy (2.3.x). Either
disable HTTP/2 (`h2`), or upgrade HAProxy.
## Bare-bones Nginx Configuration
Assumptions:
* `Nginx` version is `1.18.0` (other versions not tested).
* Hostname for the relay is `relay.example.com`.
* SSL certificate and key are located at `/etc/letsencrypt/live/relay.example.com/`.
* Relay is running on port `8080`.
```
http {
server {
listen 443 ssl;
server_name relay.example.com;
ssl_certificate /etc/letsencrypt/live/relay.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/relay.example.com/privkey.pem;
ssl_protocols TLSv1.3 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ecdh_curve secp521r1:secp384r1;
ssl_ciphers EECDH+AESGCM:EECDH+AES256;
# Optional Diffie-Helmann parameters
# Generate with openssl dhparam -out /etc/ssl/certs/dhparam.pem 4096
#ssl_dhparam /etc/ssl/certs/dhparam.pem;
ssl_session_cache shared:TLS:2m;
ssl_buffer_size 4k;
# OCSP stapling
ssl_stapling on;
ssl_stapling_verify on;
resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001]; # Cloudflare
# Set HSTS to 365 days
add_header Strict-Transport-Security 'max-age=31536000; includeSubDomains; preload' always;
keepalive_timeout 70;
location / {
proxy_pass http://localhost:8080;
proxy_http_version 1.1;
proxy_read_timeout 1d;
proxy_send_timeout 1d;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
}
}
}
```
### Nginx Notes
The above configuration was tested on `nginx` `1.18.0` on `Ubuntu` `20.04` and `22.04`
For help installing `nginx` on `Ubuntu`, see [this guide](https://www.digitalocean.com/community/tutorials/how-to-install-nginx-on-ubuntu-20-04).
For guidance on using `letsencrypt` to obtain a cert on `Ubuntu`, including an `nginx` plugin, see [this post](https://www.digitalocean.com/community/tutorials/how-to-secure-nginx-with-let-s-encrypt-on-ubuntu-20-04).
## Example Traefik Configuration
Assumptions:
* `Traefik` version is `2.9` (other versions not tested).
* `Traefik` is used for provisioning of Let's Encrypt certificates.
* `Traefik` is running in `Docker`, using `docker compose` and labels for the static configuration. An equivalent setup using a Traefik config file is possible too (but not covered here).
* Strict Transport Security is enabled.
* Hostname for the relay is `relay.example.com`, email address for ACME certificates provider is `name@example.com`.
* ipv6 is enabled, a viable private ipv6 subnet is specified in the example below.
* Relay is running on port `8080`.
```
version: '3'
networks:
nostr:
enable_ipv6: true
ipam:
config:
- subnet: fd00:db8:a::/64
gateway: fd00:db8:a::1
services:
traefik:
image: traefik:v2.9
networks:
nostr:
command:
- "--log.level=ERROR"
# letsencrypt configuration
- "--certificatesResolvers.http.acme.email==name@example.com"
- "--certificatesResolvers.http.acme.storage=/certs/acme.json"
- "--certificatesResolvers.http.acme.httpChallenge.entryPoint=http"
# define entrypoints
- "--entryPoints.http.address=:80"
- "--entryPoints.http.http.redirections.entryPoint.to=https"
- "--entryPoints.http.http.redirections.entryPoint.scheme=https"
- "--entryPoints.https.address=:443"
- "--entryPoints.https.forwardedHeaders.insecure=true"
- "--entryPoints.https.proxyProtocol.insecure=true"
# docker provider (get configuration from container labels)
- "--providers.docker.endpoint=unix:///var/run/docker.sock"
- "--providers.docker.exposedByDefault=false"
- "--providers.file.directory=/config"
- "--providers.file.watch=true"
ports:
- "80:80"
- "443:443"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "$(pwd)/traefik/certs:/certs"
- "$(pwd)/traefik/config:/config"
logging:
driver: "local"
restart: always
# example nostr config. only labels: section is relevant for Traefik config
nostr:
image: nostr-rs-relay:latest
container_name: nostr-relay
networks:
nostr:
restart: always
user: 100:100
volumes:
- '$(pwd)/nostr/data:/usr/src/app/db:Z'
- '$(pwd)/nostr/config/config.toml:/usr/src/app/config.toml:ro,Z'
labels:
- "traefik.enable=true"
- "traefik.http.routers.nostr.entrypoints=https"
- "traefik.http.routers.nostr.rule=Host(`relay.example.com`)"
- "traefik.http.routers.nostr.tls.certresolver=http"
- "traefik.http.routers.nostr.service=nostr"
- "traefik.http.services.nostr.loadbalancer.server.port=8080"
- "traefik.http.services.nostr.loadbalancer.passHostHeader=true"
- "traefik.http.middlewares.nostr.headers.sslredirect=true"
- "traefik.http.middlewares.nostr.headers.stsincludesubdomains=true"
- "traefik.http.middlewares.nostr.headers.stspreload=true"
- "traefik.http.middlewares.nostr.headers.stsseconds=63072000"
- "traefik.http.routers.nostr.middlewares=nostr"
```
### Traefik Notes
Traefik will take care of the provisioning and renewal of certificates. In case of an ipv4-only relay, simply detele the `enable_ipv6:` and `ipam:` entries in the `networks:` section of the docker-compose file.

@@ -0,0 +1,40 @@
# Run as a linux system process
Docker makes it easy to spin up and down environments but it's also possible to run `nostr-rs-relay` as a systemd linux process.
This guide assumes you're on a Linux machine and that Rust is already installed.
## Instructions
### Build nostr-rs-relay from source
Start by building the application from source. Here is how to do that:
1. `git clone https://github.com/scsibug/nostr-rs-relay.git`
2. `cd nostr-rs-relay`
3. `cargo build --release`
### Place the files where they belong
We want to place the nostr-rs-relay binary and the config.toml file where they belong. While still in the root level of the nostr-rs-relay folder you cloned in last step, run the following commands:
1. `sudo cp target/release/nostr-rs-relay /usr/local/bin/`
2. `sudo mkdir /etc/nostr-rs-relay`
2. `sudo cp config.toml /etc/nostr-rs-relay`
### Create the Systemd service file
We need to create a new Systemd service file. These files are placed in the `/etc/systemd/system/` folder where you will find many other services running.
1. `sudo vim /etc/systemd/system/nostr-rs-relay.service`
2. Paste in the contents of [this service file](../contrib/nostr-rs-relay.service). Remember to replace the `User` value with your own username.
3. Save the file and exit your text editor
### Run the service
To get the service running, we need to reload the systemd daemon and enable the service.
1. `sudo systemctl daemon-reload`
2. `sudo systemctl start nostr-rs-relay.service`
3. `sudo systemctl enable nostr-rs-relay.service`
4. `sudo systemctl status nostr-rs-relay.service`
### Tips
#### Logs
The application will write logs to the journal. To read it, execute `sudo journalctl -f -u nostr-rs-relay`

@@ -0,0 +1,248 @@
# Author Verification Design Document
The relay will use NIP-05 DNS-based author verification to limit which
authors can publish events to a relay. This document describes how
this feature will operate.
## Considerations
DNS-based author verification is designed to be deployed in relays that
want to prevent spam, so there should be strong protections to prevent
unauthorized authors from persisting data. This includes data needed to
verify new authors.
There should be protections in place to ensure the relay cannot be
used to spam or flood other webservers. Additionally, there should be
protections against server-side request forgery (SSRF).
## Design Overview
### Concepts
All authors are initially "unverified". Unverified authors that submit
appropriate `NIP-05` metadata events become "candidates" for
verification. A candidate author becomes verified when the relay
inspects a kind `0` metadata event for the author with a `nip05` field,
and follows the procedure in `NIP-05` to successfully associate the
author with an internet identifier.
The `NIP-05` procedure verifies an author for a fixed period of time,
configurable by the relay operator. If this "verification expiration
time" (`verify_expiration`) is exceeded without being refreshed, they
are once again unverified.
Verified authors have their status regularly and automatically updated
through scheduled polling to their verified domain, this process is
"re-verification". It is performed based on the configuration setting
`verify_update_frequency`, which defines how long the relay waits
between verification attempts (whether the result was success or
failure).
Authors may change their verification data (the internet identifier from
`NIP-05`) with a new metadata event, which then requires
re-verification. Their old verification remains valid until
expiration.
Performing candidate author verification is a best-effort activity and
may be significantly rate-limited to prevent relays being used to
attack other hosts. Candidate verification (untrusted authors) should
never impact re-verification (trusted authors).
## Operating Modes
The relay may operate in one of three modes. "Disabled" performs no
validation activities, and will never permit or deny events based on
an author's NIP-05 metadata. "Passive" performs NIP-05 validation,
but does not permit or deny events based on the validity or presence
of NIP-05 metadata. "Enabled" will require current and valid NIP-05
metadata for any events to be persisted. "Enabled" mode will
additionally consider domain whitelist/blacklist configuration data to
restrict which author's events are persisted.
## Design Details
### Data Storage
Verification is stored in a dedicated table. This tracks:
* `nip05` identifier
* most recent verification timestamp
* most recent verification failure timestamp
* reference to the metadata event (used for tracking `created_at` and
`pubkey`)
### Event Handling
All events are first validated to ensure the signature is valid.
Incoming events of kind _other_ than metadata (kind `0`) submitted by
clients will be evaluated as follows.
* If the event's author has a current verification, the event is
persisted as normal.
* If the event's author has either no verification, or the
verification is expired, the event is rejected.
If the event is a metadata event, we handle it differently.
We first determine the verification status of the event's pubkey.
* If the event author is unverified, AND the event contains a `nip05`
key, we consider this a verification candidate.
* If the event author is unverified, AND the event does not contain a
`nip05` key, this is not a candidate, and the event is dropped.
* If the event author is verified, AND the event contains a `nip05`
key that is identical to the currently stored value, no special
action is needed.
* If the event author is verified, AND the event contains a different
`nip05` than was previously verified, with a more recent timestamp,
we need to re-verify.
* If the event author is verified, AND the event is missing a `nip05`
key, and the event timestamp is more recent than what was verified,
we do nothing. The current verification will be allowed to expire.
### Candidate Verification
When a candidate verification is requested, a rate limit will be
utilized. If the rate limit is exceeded, new candidate verification
requests will be dropped. In practice, this is implemented by a
size-limited channel that drops events that exceed a threshold.
Candidates are never persisted in the database.
### Re-Verification
Re-verification is straightforward when there has been no change to
the `nip05` key. A new request to the `nip05` domain is performed,
and if successful, the verification timestamp is updated to the
current time. If the request fails due to a timeout or server error,
the failure timestamp is updated instead.
When the the `nip05` key has changed and this event is more recent, we
will create a new verification record, and delete all other records
for the same name.
Regarding creating new records vs. updating: We never update the event
reference or `nip05` identifier in a verification record. Every update
either reset the last failure or last success timestamp.
### Determining Verification Status
In determining if an event is from a verified author, the following
procedure should be used:
Join the verification table with the event table, to provide
verification data alongside the event `created_at` and `pubkey`
metadata. Find the most recent verification record for the author,
based on the `created_at` time.
Reject the record if the success timestamp is not within our
configured expiration time.
Reject records with disallowed domains, based on any whitelists or
blacklists in effect.
If a result remains, the author is treated as verified.
This does give a time window for authors transitioning their verified
status between domains. There may be a period of time in which there
are multiple valid rows in the verification table for a given author.
### Cleaning Up Inactive Verifications
After a author verification has expired, we will continue to check for
it to become valid again. After a configurable number of attempts, we
should simply forget it, and reclaim the space.
### Addition of Domain Whitelist/Blacklist
A set of whitelisted or blacklisted domains may be provided. If both
are provided, only the whitelist is used. In this context, domains
are either "allowed" (present on a whitelist and NOT present on a
blacklist), or "denied" (NOT present on a whitelist and present on a
blacklist).
The processes outlined so far are modified in the presence of these
options:
* Only authors with allowed domains can become candidates for
verification.
* Verification status queries additionally filter out any denied
domains.
* Re-verification processes only proceed with allowed domains.
### Integration
We have an existing database writer thread, which receives events and
attempts to persist them to disk. Once validated and persisted, these
events are broadcast to all subscribers.
When verification is enabled, the writer must check to ensure a valid,
unexpired verification record exists for the author. All metadata
events (regardless of verification status) are forwarded to a verifier
module. If the verifier determines a new verification record is
needed, it is also responsible for persisting and broadcasting the
event, just as the database writer would have done.
## Threat Scenarios
Some of these mitigations are fully implemented, others are documented
simply to demonstrate a mitigation is possible.
### Domain Spamming
*Threat*: A author with a high-volume of events creates a metadata event
with a bogus domain, causing the relay to generate significant
unwanted traffic to a target.
*Mitigation*: Rate limiting for all candidate verification will limit
external requests to a reasonable amount. Currently, this is a simple
delay that slows down the HTTP task.
### Denial of Service for Legitimate Authors
*Threat*: A author with a high-volume of events creates a metadata event
with a domain that is invalid for them, _but which is used by other
legitimate authors_. This triggers rate-limiting against the legitimate
domain, and blocks authors from updating their own metadata.
*Mitigation*: Rate limiting should only apply to candidates, so any
existing verified authors have priority for re-verification. New
authors will be affected, as we can not distinguish between the threat
and a legitimate author. _(Unimplemented)_
### Denial of Service by Consuming Storage
*Threat*: A author creates a high volume of random metadata events with
unique domains, in order to cause us to store large amounts of data
for to-be-verified authors.
*Mitigation*: No data is stored for candidate authors. This makes it
harder for new authors to become verified, but is effective at
preventing this attack.
### Metadata Replay for Verified Author
*Threat*: Attacker replays out-of-date metadata event for a author, to
cause a verification to fail.
*Mitigation*: New metadata events have their signed timestamp compared
against the signed timestamp of the event that has most recently
verified them. If the metadata event is older, it is discarded.
### Server-Side Request Forgery via Metadata
*Threat*: Attacker includes malicious data in the `nip05` event, which
is used to generate HTTP requests against potentially internal
resources. Either leaking data, or invoking webservices beyond their
own privileges.
*Mitigation*: Consider detecting and dropping when the `nip05` field
is an IP address. Allow the relay operator to utilize the `blacklist`
or `whitelist` to constrain hosts that will be contacted. Most
importantly, the verification process is hardcoded to only make
requests to a known url path
(`.well-known/nostr.json?name=<LOCAL_NAME>`). The `<LOCAL_NAME>`
component is restricted to a basic ASCII subset (preventing additional
URL components).

1010
examples/nauthz/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

@@ -0,0 +1,13 @@
[package]
name = "nauthz-server"
version = "0.1.0"
edition = "2021"
[dependencies]
# Common dependencies
tokio = { version = "1.0", features = ["rt-multi-thread", "macros"] }
prost = "0.11"
tonic = "0.8.3"
[build-dependencies]
tonic-build = { version="0.8.3", features = ["prost"] }

7
examples/nauthz/build.rs Normal file

@@ -0,0 +1,7 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::configure()
.build_server(true)
.protoc_arg("--experimental_allow_proto3_optional")
.compile(&["../../proto/nauthz.proto"], &["../../proto"])?;
Ok(())
}

@@ -0,0 +1,60 @@
use tonic::{transport::Server, Request, Response, Status};
use nauthz_grpc::authorization_server::{Authorization, AuthorizationServer};
use nauthz_grpc::{Decision, EventReply, EventRequest};
pub mod nauthz_grpc {
tonic::include_proto!("nauthz");
}
#[derive(Default)]
pub struct EventAuthz {
allowed_kinds: Vec<u64>,
}
#[tonic::async_trait]
impl Authorization for EventAuthz {
async fn event_admit(
&self,
request: Request<EventRequest>,
) -> Result<Response<EventReply>, Status> {
let reply;
let req = request.into_inner();
let event = req.event.unwrap();
let content_prefix: String = event.content.chars().take(40).collect();
println!("recvd event, [kind={}, origin={:?}, nip05_domain={:?}, tag_count={}, content_sample={:?}]",
event.kind, req.origin, req.nip05.map(|x| x.domain), event.tags.len(), content_prefix);
// Permit any event with a whitelisted kind
if self.allowed_kinds.contains(&event.kind) {
println!("This looks fine! (kind={})", event.kind);
reply = nauthz_grpc::EventReply {
decision: Decision::Permit as i32,
message: None,
};
} else {
println!("Blocked! (kind={})", event.kind);
reply = nauthz_grpc::EventReply {
decision: Decision::Deny as i32,
message: Some(format!("kind {} not permitted", event.kind)),
};
}
Ok(Response::new(reply))
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let addr = "[::1]:50051".parse().unwrap();
// A simple authorization engine that allows kinds 0-3
let checker = EventAuthz {
allowed_kinds: vec![0, 1, 2, 3],
};
println!("EventAuthz Server listening on {}", addr);
// Start serving
Server::builder()
.add_service(AuthorizationServer::new(checker))
.serve(addr)
.await?;
Ok(())
}

82
flake.lock generated Normal file

@@ -0,0 +1,82 @@
{
"nodes": {
"crane": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1719249093,
"narHash": "sha256-0q1haa3sw6GbmJ+WhogMnducZGjEaCa/iR6hF2vq80I=",
"owner": "ipetkov",
"repo": "crane",
"rev": "9791c77eb7e98b8d8ac5b0305d47282f994411ca",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1719254875,
"narHash": "sha256-ECni+IkwXjusHsm9Sexdtq8weAq/yUyt1TWIemXt3Ko=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "2893f56de08021cffd9b6b6dfc70fd9ccd51eb60",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"crane": "crane",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

44
flake.nix Normal file

@@ -0,0 +1,44 @@
{
description = "Nostr Relay written in Rust";
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
crane = {
url = "github:ipetkov/crane";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = inputs@{ self, ... }:
inputs.flake-utils.lib.eachDefaultSystem (system:
let
pkgs = inputs.nixpkgs.legacyPackages.${system};
craneLib = inputs.crane.mkLib pkgs;
src = pkgs.lib.cleanSourceWith {
src = ./.;
filter = path: type:
(pkgs.lib.hasSuffix "\.proto" path) ||
# Default filter from crane (allow .rs files)
(craneLib.filterCargoSources path type)
;
};
crate = craneLib.buildPackage {
name = "nostr-rs-relay";
inherit src;
nativeBuildInputs = [ pkgs.pkg-config pkgs.protobuf ];
};
in
{
checks = {
inherit crate;
};
packages.default = crate;
formatter = pkgs.nixpkgs-fmt;
devShells.default = craneLib.devShell {
checks = self.checks.${system};
};
});
}

60
proto/nauthz.proto Normal file

@@ -0,0 +1,60 @@
syntax = "proto3";
// Nostr Authorization Services
package nauthz;
// Authorization for actions against a relay
service Authorization {
// Determine if an event should be admitted to the relay
rpc EventAdmit(EventRequest) returns (EventReply) {}
}
message Event {
bytes id = 1; // 32-byte SHA256 hash of serialized event
bytes pubkey = 2; // 32-byte public key of event creator
fixed64 created_at = 3; // UNIX timestamp provided by event creator
uint64 kind = 4; // event kind
string content = 5; // arbitrary event contents
repeated TagEntry tags = 6; // event tag array
bytes sig = 7; // 32-byte signature of the event id
// Individual values for a single tag
message TagEntry {
repeated string values = 1;
}
}
// Event data and metadata for authorization decisions
message EventRequest {
Event event =
1; // the event to be admitted for further relay processing
optional string ip_addr =
2; // IP address of the client that submitted the event
optional string origin =
3; // HTTP origin header from the client, if one exists
optional string user_agent =
4; // HTTP user-agent header from the client, if one exists
optional bytes auth_pubkey =
5; // the public key associated with a NIP-42 AUTH'd session, if
// authentication occurred
optional Nip05Name nip05 =
6; // NIP-05 address associated with the event pubkey, if it is
// known and has been validated by the relay
// A NIP_05 verification record
message Nip05Name {
string local = 1;
string domain = 2;
}
}
// A permit or deny decision
enum Decision {
DECISION_UNSPECIFIED = 0;
DECISION_PERMIT = 1; // Admit this event for further processing
DECISION_DENY = 2; // Deny persisting or propagating this event
}
// Response to a event authorization request
message EventReply {
Decision decision = 1; // decision to enforce
optional string message = 2; // informative message for the client
}

@@ -1,53 +0,0 @@
# Reverse Proxy Setup Guide
It is recommended to run `nostr-rs-relay` behind a reverse proxy such
as `haproxy` or `nginx` to provide TLS termination. A simple example
of an `haproxy` configuration is documented here.
## Minimal HAProxy Configuration
Assumptions:
* HAProxy version is `2.4.10` or greater (older versions not tested).
* Hostname for the relay is `relay.example.com`.
* Your relay should be available over wss://relay.example.com
* Your (NIP-11) relay info page should be available on https://relay.example.com
* SSL certificate is located in `/etc/certs/example.com.pem`.
* Relay is running on port 8080.
* Limit connections to 400 concurrent.
* HSTS (HTTP Strict Transport Security) is desired.
* Only TLS 1.2 or greater is allowed.
```
global
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
frontend fe_prod
mode http
bind :443 ssl crt /etc/certs/example.com.pem alpn h2,http/1.1
bind :80
http-request set-header X-Forwarded-Proto https if { ssl_fc }
redirect scheme https code 301 if !{ ssl_fc }
acl host_relay hdr(host) -i relay.example.com
use_backend relay if host_relay
# HSTS (1 year)
http-response set-header Strict-Transport-Security max-age=31536000
backend relay
mode http
timeout connect 5s
timeout client 50s
timeout server 50s
timeout tunnel 1h
timeout client-fin 30s
option tcp-check
default-server maxconn 400 check inter 20s fastinter 1s
server relay 127.0.0.1:8080
```
### Notes
You may experience WebSocket connection problems with Firefox if
HTTP/2 is enabled, for older versions of HAProxy (2.3.x). Either
disable HTTP/2 (`h2`), or upgrade HAProxy.

@@ -1 +1,4 @@
edition = "2018"
edition = "2021"
#max_width = 140
#chain_width = 100
#fn_call_width = 100

180
src/bin/bulkloader.rs Normal file

@@ -0,0 +1,180 @@
use nostr_rs_relay::config;
use nostr_rs_relay::error::{Error, Result};
use nostr_rs_relay::event::{single_char_tagname, Event};
use nostr_rs_relay::repo::sqlite::{build_pool, PooledConnection};
use nostr_rs_relay::repo::sqlite_migration::{curr_db_version, DB_VERSION};
use nostr_rs_relay::utils::is_lower_hex;
use rusqlite::params;
use rusqlite::{OpenFlags, Transaction};
use std::io;
use std::path::Path;
use std::sync::mpsc;
use std::thread;
use tracing::info;
/// Bulk load JSONL data from STDIN to the database specified in config.toml (or ./nostr.db as a default).
/// The database must already exist, this will not create a new one.
/// Tested against schema v13.
pub fn main() -> Result<()> {
let _trace_sub = tracing_subscriber::fmt::try_init();
println!("Nostr-rs-relay Bulk Loader");
// check for a database file, or create one.
let settings = config::Settings::new(&None)?;
if !Path::new(&settings.database.data_directory).is_dir() {
info!("Database directory does not exist");
return Err(Error::DatabaseDirError);
}
// Get a database pool
let pool = build_pool(
"bulk-loader",
&settings,
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
1,
4,
false,
);
{
// check for database schema version
let mut conn: PooledConnection = pool.get()?;
let version = curr_db_version(&mut conn)?;
info!("current version is: {:?}", version);
// ensure the schema version is current.
if version != DB_VERSION {
info!("version is not current, exiting");
panic!("cannot write to schema other than v{DB_VERSION}");
}
}
// this channel will contain parsed events ready to be inserted
let (event_tx, event_rx) = mpsc::sync_channel(100_000);
// Thread for reading events
let _stdin_reader_handler = thread::spawn(move || {
let stdin = io::stdin();
for readline in stdin.lines() {
if let Ok(line) = readline {
// try to parse a nostr event
let eres: Result<Event, serde_json::Error> = serde_json::from_str(&line);
if let Ok(mut e) = eres {
if let Ok(()) = e.validate() {
e.build_index();
//debug!("Event: {:?}", e);
event_tx.send(Some(e)).ok();
} else {
info!("could not validate event");
}
} else {
info!("error reading event: {:?}", eres);
}
} else {
// error reading
info!("error reading: {:?}", readline);
}
}
info!("finished parsing events");
event_tx.send(None).ok();
let ok: Result<()> = Ok(());
ok
});
let mut conn: PooledConnection = pool.get()?;
let mut events_read = 0;
let event_batch_size = 50_000;
let mut new_events = 0;
let mut has_more_events = true;
while has_more_events {
// begin a transaction
let tx = conn.transaction()?;
// read in batch_size events and commit
for _ in 0..event_batch_size {
match event_rx.recv() {
Ok(Some(e)) => {
events_read += 1;
// ignore ephemeral events
if !(e.kind >= 20000 && e.kind < 30000) {
match write_event(&tx, e) {
Ok(c) => {
new_events += c;
}
Err(e) => {
info!("error inserting event: {:?}", e);
}
}
}
}
Ok(None) => {
// signal that the sender will never produce more
// events
has_more_events = false;
break;
}
Err(_) => {
info!("sender is closed");
// sender is done
}
}
}
info!("committed {} events...", new_events);
tx.commit()?;
conn.execute_batch("pragma wal_checkpoint(truncate)")?;
}
info!("processed {} events", events_read);
info!("stored {} new events", new_events);
// get a connection for writing events
// read standard in.
info!("finished reading input");
Ok(())
}
/// Write an event and update the tag table.
/// Assumes the event has its index built.
fn write_event(tx: &Transaction, e: Event) -> Result<usize> {
let id_blob = hex::decode(&e.id).ok();
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
let delegator_blob: Option<Vec<u8>> = e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
let event_str = serde_json::to_string(&e).ok();
// ignore if the event hash is a duplicate.
let ins_count = tx.execute(
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, strftime('%s','now'), FALSE);",
params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str]
)?;
if ins_count == 0 {
return Ok(0);
}
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_id = tx.last_insert_rowid();
// look at each event, and each tag, creating new tag entries if appropriate.
for t in e.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.first().unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
// safe because len was > 1
let tagval = t.get(1).unwrap();
// insert as BLOB if we can restore it losslessly.
// this means it needs to be even length and lowercase.
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
tx.execute(
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
params![event_id, tagname, hex::decode(tagval).ok()],
)?;
} else {
// otherwise, insert as text
tx.execute(
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
params![event_id, tagname, &tagval],
)?;
}
}
if e.is_replaceable() {
//let query = "SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1;";
//let count: usize = tx.query_row(query, params![e.kind, pubkey_blob], |row| row.get(0))?;
//info!("found {} rows that /would/ be preserved", count);
match tx.execute(
"DELETE FROM event WHERE kind=? and author=? and id NOT IN (SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1);",
params![e.kind, pubkey_blob, e.kind, pubkey_blob],
) {
Ok(_) => {},
Err(x) => {info!("error deleting replaceable event: {:?}",x);}
}
}
Ok(ins_count)
}

20
src/cli.rs Normal file

@@ -0,0 +1,20 @@
use clap::Parser;
#[derive(Parser)]
#[command(about = "A nostr relay written in Rust", author = env!("CARGO_PKG_AUTHORS"), version = env!("CARGO_PKG_VERSION"))]
pub struct CLIArgs {
#[arg(
short,
long,
help = "Use the <directory> as the location of the database",
required = false
)]
pub db: Option<String>,
#[arg(
short,
long,
help = "Use the <file name> as the location of the config file",
required = false
)]
pub config: Option<String>,
}

@@ -1,9 +1,11 @@
//! Subscription close request parsing
//!
//! Representation and parsing of `CLOSE` messages sent from clients.
use crate::error::{Error, Result};
use serde::{Deserialize, Serialize};
/// Close command in network format
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct CloseCmd {
/// Protocol command, expected to always be "CLOSE".
cmd: String,
@@ -11,8 +13,8 @@ pub struct CloseCmd {
id: String,
}
/// Close command parsed
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
/// Identifier of the subscription to be closed.
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct Close {
/// The subscription identifier being closed.
pub id: String,
@@ -21,10 +23,10 @@ pub struct Close {
impl From<CloseCmd> for Result<Close> {
fn from(cc: CloseCmd) -> Result<Close> {
// ensure command is correct
if cc.cmd != "CLOSE" {
Err(Error::CommandUnknownError)
} else {
if cc.cmd == "CLOSE" {
Ok(Close { id: cc.id })
} else {
Err(Error::CommandUnknownError)
}
}
}

@@ -1,12 +1,8 @@
use lazy_static::lazy_static;
use log::*;
//! Configuration file and settings management
use crate::payment::Processor;
use config::{Config, ConfigError, File};
use serde::{Deserialize, Serialize};
use std::sync::RwLock;
// initialize a singleton default configuration
lazy_static! {
pub static ref SETTINGS: RwLock<Settings> = RwLock::new(Settings::default());
}
use std::time::Duration;
#[derive(Debug, Serialize, Deserialize, Clone)]
#[allow(unused)]
@@ -16,31 +12,46 @@ pub struct Info {
pub description: Option<String>,
pub pubkey: Option<String>,
pub contact: Option<String>,
pub favicon: Option<String>,
pub relay_icon: Option<String>,
pub relay_page: Option<String>,
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Database {
pub data_directory: String,
pub engine: String,
pub in_memory: bool,
pub min_conn: u32,
pub max_conn: u32,
pub connection: String,
pub connection_write: Option<String>,
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Grpc {
pub event_admission_server: Option<String>,
pub restricts_write: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Network {
pub port: u16,
pub address: String,
pub remote_ip_header: Option<String>, // retrieve client IP from this HTTP header if present
pub ping_interval_seconds: u32,
}
//
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Options {
pub reject_future_seconds: Option<usize>, // if defined, reject any events with a timestamp more than X seconds in the future
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Retention {
// TODO: implement
@@ -50,58 +61,216 @@ pub struct Retention {
pub whitelist_addresses: Option<Vec<String>>, // whitelisted addresses (never delete)
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Limits {
pub messages_per_sec: Option<u32>, // Artificially slow down event writing to limit disk consumption (averaged over 1 minute)
pub subscriptions_per_min: Option<u32>, // Artificially slow down request (db query) creation to prevent abuse (averaged over 1 minute)
pub db_conns_per_client: Option<u32>, // How many concurrent database queries (not subscriptions) may a client have?
pub max_blocking_threads: usize,
pub max_event_bytes: Option<usize>, // Maximum size of an EVENT message
pub max_ws_message_bytes: Option<usize>,
pub max_ws_frame_bytes: Option<usize>,
pub broadcast_buffer: usize, // events to buffer for subscribers (prevents slow readers from consuming memory)
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
pub event_kind_blacklist: Option<Vec<u64>>,
pub event_kind_allowlist: Option<Vec<u64>>,
pub limit_scrapers: bool,
}
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Authorization {
pub pubkey_whitelist: Option<Vec<String>>, // If present, only allow these pubkeys to publish events
pub nip42_auth: bool, // if true enables NIP-42 authentication
pub nip42_dms: bool, // if true send DMs only to their authenticated recipients
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct PayToRelay {
pub enabled: bool,
pub admission_cost: u64, // Cost to have pubkey whitelisted
pub cost_per_event: u64, // Cost author to pay per event
pub node_url: String,
pub api_secret: String,
pub terms_message: String,
pub sign_ups: bool, // allow new users to sign up to relay
pub direct_message: bool, // Send direct message to user with invoice and terms
pub secret_key: Option<String>,
pub processor: Processor,
pub rune_path: Option<String>, // To access clightning API
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Diagnostics {
pub tracing: bool, // enables tokio console-subscriber
}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy)]
#[serde(rename_all = "lowercase")]
pub enum VerifiedUsersMode {
Enabled,
Passive,
Disabled,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct VerifiedUsers {
pub mode: VerifiedUsersMode, // Mode of operation: "enabled" (enforce) or "passive" (check only). If none, this is simply disabled.
pub domain_whitelist: Option<Vec<String>>, // If present, only allow verified users from these domains can publish events
pub domain_blacklist: Option<Vec<String>>, // If present, allow all verified users from any domain except these
pub verify_expiration: Option<String>, // how long a verification is cached for before no longer being used
pub verify_update_frequency: Option<String>, // how often to attempt to update verification
pub verify_expiration_duration: Option<Duration>, // internal result of parsing verify_expiration
pub verify_update_frequency_duration: Option<Duration>, // internal result of parsing verify_update_frequency
pub max_consecutive_failures: usize, // maximum number of verification failures in a row, before ceasing future checks
}
impl VerifiedUsers {
pub fn init(&mut self) {
self.verify_expiration_duration = self.verify_expiration_duration();
self.verify_update_frequency_duration = self.verify_update_duration();
}
#[must_use]
pub fn is_enabled(&self) -> bool {
self.mode == VerifiedUsersMode::Enabled
}
#[must_use]
pub fn is_active(&self) -> bool {
self.mode == VerifiedUsersMode::Enabled || self.mode == VerifiedUsersMode::Passive
}
#[must_use]
pub fn is_passive(&self) -> bool {
self.mode == VerifiedUsersMode::Passive
}
#[must_use]
pub fn verify_expiration_duration(&self) -> Option<Duration> {
self.verify_expiration
.as_ref()
.and_then(|x| parse_duration::parse(x).ok())
}
#[must_use]
pub fn verify_update_duration(&self) -> Option<Duration> {
self.verify_update_frequency
.as_ref()
.and_then(|x| parse_duration::parse(x).ok())
}
#[must_use]
pub fn is_valid(&self) -> bool {
self.verify_expiration_duration().is_some() && self.verify_update_duration().is_some()
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Logging {
pub folder_path: Option<String>,
pub file_prefix: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Settings {
pub info: Info,
pub diagnostics: Diagnostics,
pub database: Database,
pub grpc: Grpc,
pub network: Network,
pub limits: Limits,
pub authorization: Authorization,
pub pay_to_relay: PayToRelay,
pub verified_users: VerifiedUsers,
pub retention: Retention,
pub options: Options,
pub logging: Logging,
}
impl Settings {
pub fn new() -> Self {
let d = Self::default();
pub fn new(config_file_name: &Option<String>) -> Result<Self, ConfigError> {
let default_settings = Self::default();
// attempt to construct settings with file
// Self::new_from_default(&d).unwrap_or(d)
let from_file = Self::new_from_default(&d);
let from_file = Self::new_from_default(&default_settings, config_file_name);
match from_file {
Ok(f) => f,
Err(e) => {
warn!("Error reading config file ({:?})", e);
d
// pass up the parse error if the config file was specified,
// otherwise use the default config (with a warning).
if config_file_name.is_some() {
Err(e)
} else {
eprintln!("Error reading config file ({:?})", e);
eprintln!("WARNING: Default configuration settings will be used");
Ok(default_settings)
}
}
ok => ok,
}
}
fn new_from_default(default: &Settings) -> Result<Self, config::ConfigError> {
let config: config::Config = config::Config::new();
let settings: Settings = config
fn new_from_default(
default: &Settings,
config_file_name: &Option<String>,
) -> Result<Self, ConfigError> {
let default_config_file_name = "config.toml".to_string();
let config: &String = match config_file_name {
Some(value) => value,
None => &default_config_file_name,
};
let builder = Config::builder();
let config: Config = builder
// use defaults
.with_merged(config::Config::try_from(default).unwrap())?
.add_source(Config::try_from(default)?)
// override with file contents
.with_merged(config::File::with_name("config"))?
.try_into()?;
.add_source(File::with_name(config))
.build()?;
let mut settings: Settings = config.try_deserialize()?;
// ensure connection pool size is logical
if settings.database.min_conn > settings.database.max_conn {
panic!(
"Database min_conn setting ({}) cannot exceed max_conn ({})",
settings.database.min_conn, settings.database.max_conn
);
assert!(
settings.database.min_conn <= settings.database.max_conn,
"Database min_conn setting ({}) cannot exceed max_conn ({})",
settings.database.min_conn,
settings.database.max_conn
);
// ensure durations parse
assert!(
settings.verified_users.is_valid(),
"VerifiedUsers time settings could not be parsed"
);
// initialize durations for verified users
settings.verified_users.init();
// Validate pay to relay settings
if settings.pay_to_relay.enabled {
if settings.pay_to_relay.processor == Processor::ClnRest {
assert!(settings
.pay_to_relay
.rune_path
.as_ref()
.is_some_and(|path| path != "<rune path>"));
} else if settings.pay_to_relay.processor == Processor::LNBits {
assert_ne!(settings.pay_to_relay.api_secret, "");
}
// Should check that url is valid
assert_ne!(settings.pay_to_relay.node_url, "");
assert_ne!(settings.pay_to_relay.terms_message, "");
if settings.pay_to_relay.direct_message {
assert!(settings
.pay_to_relay
.secret_key
.as_ref()
.is_some_and(|key| key != "<nostr nsec>"));
}
}
Ok(settings)
}
}
@@ -115,23 +284,71 @@ impl Default for Settings {
description: None,
pubkey: None,
contact: None,
favicon: None,
relay_icon: None,
relay_page: None,
},
diagnostics: Diagnostics { tracing: false },
database: Database {
data_directory: ".".to_owned(),
engine: "sqlite".to_owned(),
in_memory: false,
min_conn: 4,
max_conn: 128,
max_conn: 8,
connection: "".to_owned(),
connection_write: None,
},
grpc: Grpc {
event_admission_server: None,
restricts_write: false,
},
network: Network {
port: 8080,
ping_interval_seconds: 300,
address: "0.0.0.0".to_owned(),
remote_ip_header: None,
},
limits: Limits {
messages_per_sec: None,
subscriptions_per_min: None,
db_conns_per_client: None,
max_blocking_threads: 16,
max_event_bytes: Some(2 << 17), // 128K
max_ws_message_bytes: Some(2 << 17), // 128K
max_ws_frame_bytes: Some(2 << 17), // 128K
broadcast_buffer: 4096,
event_persist_buffer: 16,
broadcast_buffer: 16384,
event_persist_buffer: 4096,
event_kind_blacklist: None,
event_kind_allowlist: None,
limit_scrapers: false,
},
authorization: Authorization {
pubkey_whitelist: None, // Allow any address to publish
nip42_auth: false, // Disable NIP-42 authentication
nip42_dms: false, // Send DMs to everybody
},
pay_to_relay: PayToRelay {
enabled: false,
admission_cost: 4200,
cost_per_event: 0,
terms_message: "".to_string(),
node_url: "".to_string(),
api_secret: "".to_string(),
rune_path: None,
sign_ups: false,
direct_message: false,
secret_key: None,
processor: Processor::LNBits,
},
verified_users: VerifiedUsers {
mode: VerifiedUsersMode::Disabled,
domain_whitelist: None,
domain_blacklist: None,
verify_expiration: Some("1 week".to_owned()),
verify_update_frequency: Some("1 day".to_owned()),
verify_expiration_duration: None,
verify_update_frequency_duration: None,
max_consecutive_failures: 20,
},
retention: Retention {
max_events: None, // max events
@@ -140,7 +357,11 @@ impl Default for Settings {
whitelist_addresses: None, // whitelisted addresses (never delete)
},
options: Options {
reject_future_seconds: Some(30 * 60), // Reject events 30min in the future or greater
reject_future_seconds: None, // Reject events in the future if defined
},
logging: Logging {
folder_path: None,
file_prefix: None,
},
}
}

@@ -1,69 +1,115 @@
//! Client connection state
use std::collections::HashMap;
use tracing::{debug, trace};
use uuid::Uuid;
use crate::close::Close;
use crate::conn::Nip42AuthState::{AuthPubkey, Challenge, NoAuth};
use crate::error::Error;
use crate::error::Result;
use crate::event::Event;
use crate::subscription::Subscription;
use log::*;
use std::collections::HashMap;
use uuid::Uuid;
use crate::utils::{host_str, unix_time};
/// A subscription identifier has a maximum length
const MAX_SUBSCRIPTION_ID_LEN: usize = 256;
/// NIP-42 authentication state
pub enum Nip42AuthState {
/// The client is not authenticated yet
NoAuth,
/// The AUTH challenge sent
Challenge(String),
/// The client is authenticated
AuthPubkey(String),
}
/// State for a client connection
pub struct ClientConn {
/// Client IP (either from socket, or configured proxy header
client_ip_addr: String,
/// Unique client identifier generated at connection time
client_id: Uuid,
/// The current set of active client subscriptions
subscriptions: HashMap<String, Subscription>,
/// Per-connection maximum concurrent subscriptions
max_subs: usize,
/// NIP-42 AUTH
auth: Nip42AuthState,
}
impl Default for ClientConn {
fn default() -> Self {
Self::new()
Self::new("unknown".to_owned())
}
}
impl ClientConn {
/// Create a new, empty connection state.
pub fn new() -> Self {
#[must_use]
pub fn new(client_ip_addr: String) -> Self {
let client_id = Uuid::new_v4();
ClientConn {
client_ip_addr,
client_id,
subscriptions: HashMap::new(),
max_subs: 32,
auth: NoAuth,
}
}
#[must_use]
pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
&self.subscriptions
}
/// Check if the given subscription already exists
#[must_use]
pub fn has_subscription(&self, sub: &Subscription) -> bool {
self.subscriptions.values().any(|x| x == sub)
}
/// Get a short prefix of the client's unique identifier, suitable
/// for logging.
#[must_use]
pub fn get_client_prefix(&self) -> String {
self.client_id.to_string().chars().take(8).collect()
}
/// Find all matching subscriptions.
pub fn get_matching_subscriptions(&self, e: &Event) -> Vec<&str> {
let mut v: Vec<&str> = vec![];
for (id, sub) in self.subscriptions.iter() {
if sub.interested_in_event(e) {
v.push(id);
}
#[must_use]
pub fn ip(&self) -> &str {
&self.client_ip_addr
}
#[must_use]
pub fn auth_pubkey(&self) -> Option<&String> {
match &self.auth {
AuthPubkey(pubkey) => Some(pubkey),
_ => None,
}
}
#[must_use]
pub fn auth_challenge(&self) -> Option<&String> {
match &self.auth {
Challenge(pubkey) => Some(pubkey),
_ => None,
}
v
}
/// Add a new subscription for this connection.
/// # Errors
///
/// Will return `Err` if the client has too many subscriptions, or
/// if the provided name is excessively long.
pub fn subscribe(&mut self, s: Subscription) -> Result<()> {
let k = s.get_id();
let sub_id_len = k.len();
// prevent arbitrarily long subscription identifiers from
// being used.
if sub_id_len > MAX_SUBSCRIPTION_ID_LEN {
info!(
debug!(
"ignoring sub request with excessive length: ({})",
sub_id_len
);
@@ -72,8 +118,12 @@ impl ClientConn {
// check if an existing subscription exists, and replace if so
if self.subscriptions.contains_key(&k) {
self.subscriptions.remove(&k);
self.subscriptions.insert(k, s);
debug!("replaced existing subscription");
self.subscriptions.insert(k, s.clone());
trace!(
"replaced existing subscription (cid: {}, sub: {:?})",
self.get_client_prefix(),
s.get_id()
);
return Ok(());
}
@@ -83,20 +133,97 @@ impl ClientConn {
}
// add subscription
self.subscriptions.insert(k, s);
debug!(
"registered new subscription, currently have {} active subs",
self.subscriptions.len()
trace!(
"registered new subscription, currently have {} active subs (cid: {})",
self.subscriptions.len(),
self.get_client_prefix(),
);
Ok(())
}
/// Remove the subscription for this connection.
pub fn unsubscribe(&mut self, c: Close) {
pub fn unsubscribe(&mut self, c: &Close) {
// TODO: return notice if subscription did not exist.
self.subscriptions.remove(&c.id);
debug!(
"removed subscription, currently have {} active subs",
self.subscriptions.len()
trace!(
"removed subscription, currently have {} active subs (cid: {})",
self.subscriptions.len(),
self.get_client_prefix(),
);
}
pub fn generate_auth_challenge(&mut self) {
self.auth = Challenge(Uuid::new_v4().to_string());
}
pub fn authenticate(&mut self, event: &Event, relay_url: &str) -> Result<()> {
match &self.auth {
Challenge(_) => (),
AuthPubkey(_) => {
// already authenticated
return Ok(());
}
NoAuth => {
// unexpected AUTH request
return Err(Error::AuthFailure);
}
}
match event.validate() {
Ok(_) => {
if event.kind != 22242 {
return Err(Error::AuthFailure);
}
let curr_time = unix_time();
let past_cutoff = curr_time - 600; // 10 minutes
let future_cutoff = curr_time + 600; // 10 minutes
if event.created_at < past_cutoff || event.created_at > future_cutoff {
return Err(Error::AuthFailure);
}
let mut challenge: Option<&str> = None;
let mut relay: Option<&str> = None;
for tag in &event.tags {
if tag.len() == 2 && tag.first() == Some(&"challenge".into()) {
challenge = tag.get(1).map(|x| x.as_str());
}
if tag.len() == 2 && tag.first() == Some(&"relay".into()) {
relay = tag.get(1).map(|x| x.as_str());
}
}
match (challenge, &self.auth) {
(Some(received_challenge), Challenge(sent_challenge)) => {
if received_challenge != sent_challenge {
return Err(Error::AuthFailure);
}
}
(_, _) => {
return Err(Error::AuthFailure);
}
}
match (relay.and_then(host_str), host_str(relay_url)) {
(Some(received_relay), Some(our_relay)) => {
if received_relay != our_relay {
return Err(Error::AuthFailure);
}
}
(_, _) => {
return Err(Error::AuthFailure);
}
}
self.auth = AuthPubkey(event.pubkey.clone());
trace!(
"authenticated pubkey {} (cid: {})",
event.pubkey.chars().take(8).collect::<String>(),
self.get_client_prefix()
);
Ok(())
}
Err(_) => Err(Error::AuthFailure),
}
}
}

1149
src/db.rs

File diff suppressed because it is too large Load Diff

406
src/delegation.rs Normal file

@@ -0,0 +1,406 @@
//! Event parsing and validation
use crate::error::Error;
use crate::error::Result;
use crate::event::Event;
use bitcoin_hashes::{sha256, Hash};
use lazy_static::lazy_static;
use regex::Regex;
use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
use serde::{Deserialize, Serialize};
use std::str::FromStr;
use tracing::{debug, info};
// This handles everything related to delegation, in particular the
// condition/rune parsing and logic.
// Conditions are poorly specified, so we will implement the minimum
// necessary for now.
// fields MUST be either "kind" or "created_at".
// operators supported are ">", "<", "=", "!".
// no operations on 'content' are supported.
// this allows constraints for:
// valid date ranges (valid from X->Y dates).
// specific kinds (publish kind=1,5)
// kind ranges (publish ephemeral events, kind>19999&kind<30001)
// for more complex scenarios (allow delegatee to publish ephemeral
// AND replacement events), it may be necessary to generate and use
// different condition strings, since we do not support grouping or
// "OR" logic.
lazy_static! {
/// Secp256k1 verification instance.
pub static ref SECP: Secp256k1<VerifyOnly> = Secp256k1::verification_only();
}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub enum Field {
Kind,
CreatedAt,
}
impl FromStr for Field {
type Err = Error;
fn from_str(value: &str) -> Result<Self, Self::Err> {
if value == "kind" {
Ok(Field::Kind)
} else if value == "created_at" {
Ok(Field::CreatedAt)
} else {
Err(Error::DelegationParseError)
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub enum Operator {
LessThan,
GreaterThan,
Equals,
NotEquals,
}
impl FromStr for Operator {
type Err = Error;
fn from_str(value: &str) -> Result<Self, Self::Err> {
if value == "<" {
Ok(Operator::LessThan)
} else if value == ">" {
Ok(Operator::GreaterThan)
} else if value == "=" {
Ok(Operator::Equals)
} else if value == "!" {
Ok(Operator::NotEquals)
} else {
Err(Error::DelegationParseError)
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct ConditionQuery {
pub conditions: Vec<Condition>,
}
impl ConditionQuery {
#[must_use]
pub fn allows_event(&self, event: &Event) -> bool {
// check each condition, to ensure that the event complies
// with the restriction.
for c in &self.conditions {
if !c.allows_event(event) {
// any failing conditions invalidates the delegation
// on this event
return false;
}
}
// delegation was permitted unconditionally, or all conditions
// were true
true
}
}
// Verify that the delegator approved the delegation; return a ConditionQuery if so.
#[must_use]
pub fn validate_delegation(
delegator: &str,
delegatee: &str,
cond_query: &str,
sigstr: &str,
) -> Option<ConditionQuery> {
// form the token
let tok = format!("nostr:delegation:{delegatee}:{cond_query}");
// form SHA256 hash
let digest: sha256::Hash = sha256::Hash::hash(tok.as_bytes());
let sig = schnorr::Signature::from_str(sigstr).unwrap();
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
if let Ok(pubkey) = XOnlyPublicKey::from_str(delegator) {
let verify = SECP.verify_schnorr(&sig, &msg, &pubkey);
if verify.is_ok() {
// return the parsed condition query
cond_query.parse::<ConditionQuery>().ok()
} else {
debug!("client sent an delegation signature that did not validate");
None
}
} else {
debug!("client sent malformed delegation pubkey");
None
}
} else {
info!("error converting delegation digest to secp256k1 message");
None
}
}
/// Parsed delegation condition
/// see <https://github.com/nostr-protocol/nips/pull/28#pullrequestreview-1084903800>
/// An example complex condition would be: `kind=1,2,3&created_at<1665265999`
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct Condition {
pub field: Field,
pub operator: Operator,
pub values: Vec<u64>,
}
impl Condition {
/// Check if this condition allows the given event to be delegated
#[must_use]
pub fn allows_event(&self, event: &Event) -> bool {
// determine what the right-hand side of the operator is
let resolved_field = match &self.field {
Field::Kind => event.kind,
Field::CreatedAt => event.created_at,
};
match &self.operator {
Operator::LessThan => {
// the less-than operator is only valid for single values.
if self.values.len() == 1 {
if let Some(v) = self.values.first() {
return resolved_field < *v;
}
}
}
Operator::GreaterThan => {
// the greater-than operator is only valid for single values.
if self.values.len() == 1 {
if let Some(v) = self.values.first() {
return resolved_field > *v;
}
}
}
Operator::Equals => {
// equals is interpreted as "must be equal to at least one provided value"
return self.values.iter().any(|&x| resolved_field == x);
}
Operator::NotEquals => {
// not-equals is interpreted as "must not be equal to any provided value"
// this is the one case where an empty list of values could be allowed; even though it is a pointless restriction.
return self.values.iter().all(|&x| resolved_field != x);
}
}
false
}
}
fn str_to_condition(cs: &str) -> Option<Condition> {
// a condition is a string (alphanum+underscore), an operator (<>=!), and values (num+comma)
lazy_static! {
static ref RE: Regex = Regex::new("([[:word:]]+)([<>=!]+)([,[[:digit:]]]*)").unwrap();
}
// match against the regex
let caps = RE.captures(cs)?;
let field = caps.get(1)?.as_str().parse::<Field>().ok()?;
let operator = caps.get(2)?.as_str().parse::<Operator>().ok()?;
// values are just comma separated numbers, but all must be parsed
let rawvals = caps.get(3)?.as_str();
let values = rawvals
.split_terminator(',')
.map(|n| n.parse::<u64>().ok())
.collect::<Option<Vec<_>>>()?;
// convert field string into Field
Some(Condition {
field,
operator,
values,
})
}
/// Parse a condition query from a string slice
impl FromStr for ConditionQuery {
type Err = Error;
fn from_str(value: &str) -> Result<Self, Self::Err> {
// split the string with '&'
let mut conditions = vec![];
let condstrs = value.split_terminator('&');
// parse each individual condition
for c in condstrs {
conditions.push(str_to_condition(c).ok_or(Error::DelegationParseError)?);
}
Ok(ConditionQuery { conditions })
}
}
#[cfg(test)]
mod tests {
use super::*;
// parse condition strings
#[test]
fn parse_empty() -> Result<()> {
// given an empty condition query, produce an empty vector
let empty_cq = ConditionQuery { conditions: vec![] };
let parsed = "".parse::<ConditionQuery>()?;
assert_eq!(parsed, empty_cq);
Ok(())
}
// parse field 'kind'
#[test]
fn test_kind_field_parse() -> Result<()> {
let field = "kind".parse::<Field>()?;
assert_eq!(field, Field::Kind);
Ok(())
}
// parse field 'created_at'
#[test]
fn test_created_at_field_parse() -> Result<()> {
let field = "created_at".parse::<Field>()?;
assert_eq!(field, Field::CreatedAt);
Ok(())
}
// parse unknown field
#[test]
fn unknown_field_parse() {
let field = "unk".parse::<Field>();
assert!(field.is_err());
}
// parse a full conditional query with an empty array
#[test]
fn parse_kind_equals_empty() -> Result<()> {
// given an empty condition query, produce an empty vector
let kind_cq = ConditionQuery {
conditions: vec![Condition {
field: Field::Kind,
operator: Operator::Equals,
values: vec![],
}],
};
let parsed = "kind=".parse::<ConditionQuery>()?;
assert_eq!(parsed, kind_cq);
Ok(())
}
// parse a full conditional query with a single value
#[test]
fn parse_kind_equals_singleval() -> Result<()> {
// given an empty condition query, produce an empty vector
let kind_cq = ConditionQuery {
conditions: vec![Condition {
field: Field::Kind,
operator: Operator::Equals,
values: vec![1],
}],
};
let parsed = "kind=1".parse::<ConditionQuery>()?;
assert_eq!(parsed, kind_cq);
Ok(())
}
// parse a full conditional query with multiple values
#[test]
fn parse_kind_equals_multival() -> Result<()> {
// given an empty condition query, produce an empty vector
let kind_cq = ConditionQuery {
conditions: vec![Condition {
field: Field::Kind,
operator: Operator::Equals,
values: vec![1, 2, 4],
}],
};
let parsed = "kind=1,2,4".parse::<ConditionQuery>()?;
assert_eq!(parsed, kind_cq);
Ok(())
}
// parse multiple conditions
#[test]
fn parse_multi_conditions() -> Result<()> {
// given an empty condition query, produce an empty vector
let cq = ConditionQuery {
conditions: vec![
Condition {
field: Field::Kind,
operator: Operator::GreaterThan,
values: vec![10000],
},
Condition {
field: Field::Kind,
operator: Operator::LessThan,
values: vec![20000],
},
Condition {
field: Field::Kind,
operator: Operator::NotEquals,
values: vec![10001],
},
Condition {
field: Field::CreatedAt,
operator: Operator::LessThan,
values: vec![1_665_867_123],
},
],
};
let parsed =
"kind>10000&kind<20000&kind!10001&created_at<1665867123".parse::<ConditionQuery>()?;
assert_eq!(parsed, cq);
Ok(())
}
// Check for condition logic on event w/ empty values
#[test]
fn condition_with_empty_values() {
let mut c = Condition {
field: Field::Kind,
operator: Operator::GreaterThan,
values: vec![],
};
let e = Event::simple_event();
assert!(!c.allows_event(&e));
c.operator = Operator::LessThan;
assert!(!c.allows_event(&e));
c.operator = Operator::Equals;
assert!(!c.allows_event(&e));
// Not Equals applied to an empty list *is* allowed
// (pointless, but logically valid).
c.operator = Operator::NotEquals;
assert!(c.allows_event(&e));
}
// Check for condition logic on event w/ single value
#[test]
fn condition_kind_gt_event_single() {
let c = Condition {
field: Field::Kind,
operator: Operator::GreaterThan,
values: vec![10],
};
let mut e = Event::simple_event();
// kind is not greater than 10, not allowed
e.kind = 1;
assert!(!c.allows_event(&e));
// kind is greater than 10, allowed
e.kind = 100;
assert!(c.allows_event(&e));
// kind is 10, not allowed
e.kind = 10;
assert!(!c.allows_event(&e));
}
// Check for condition logic on event w/ multi values
#[test]
fn condition_with_multi_values() {
let mut c = Condition {
field: Field::Kind,
operator: Operator::Equals,
values: vec![0, 10, 20],
};
let mut e = Event::simple_event();
// Allow if event kind is in list for Equals
e.kind = 10;
assert!(c.allows_event(&e));
// Deny if event kind is not in list for Equals
e.kind = 11;
assert!(!c.allows_event(&e));
// Deny if event kind is in list for NotEquals
e.kind = 10;
c.operator = Operator::NotEquals;
assert!(!c.allows_event(&e));
// Allow if event kind is not in list for NotEquals
e.kind = 99;
c.operator = Operator::NotEquals;
assert!(c.allows_event(&e));
// Always deny if GreaterThan/LessThan for a list
c.operator = Operator::LessThan;
assert!(!c.allows_event(&e));
c.operator = Operator::GreaterThan;
assert!(!c.allows_event(&e));
}
}

@@ -17,10 +17,16 @@ pub enum Error {
ConnWriteError,
#[error("EVENT parse failed")]
EventParseFailed,
#[error("ClOSE message parse failed")]
#[error("CLOSE message parse failed")]
CloseParseFailed,
#[error("Event validation failed")]
EventInvalid,
#[error("Event invalid signature")]
EventInvalidSignature,
#[error("Event invalid id")]
EventInvalidId,
#[error("Event malformed pubkey")]
EventMalformedPubkey,
#[error("Event could not canonicalize")]
EventCouldNotCanonicalize,
#[error("Event too large")]
EventMaxLengthError(usize),
#[error("Subscription identifier max length exceeded")]
@@ -36,12 +42,79 @@ pub enum Error {
CommandUnknownError,
#[error("SQL error")]
SqlError(rusqlite::Error),
#[error("Config error")]
#[error("Config error : {0}")]
ConfigError(config::ConfigError),
#[error("Data directory does not exist")]
DatabaseDirError,
#[error("Database Connection Pool Error")]
DatabasePoolError(r2d2::Error),
#[error("SQL error")]
SqlxError(sqlx::Error),
#[error("Database Connection Pool Error")]
SqlxDatabasePoolError(sqlx::Error),
#[error("Custom Error : {0}")]
CustomError(String),
#[error("Task join error")]
JoinError,
#[error("Hyper Client error")]
HyperError(hyper::Error),
#[error("Hex encoding error")]
HexError(hex::FromHexError),
#[error("Delegation parse error")]
DelegationParseError,
#[error("Channel closed error")]
ChannelClosed,
#[error("Authz error")]
AuthzError,
#[error("Tonic GRPC error")]
TonicError(tonic::Status),
#[error("Invalid AUTH message")]
AuthFailure,
#[error("I/O Error")]
IoError(std::io::Error),
#[error("Event builder error")]
EventError(nostr::event::builder::Error),
#[error("Nostr key error")]
NostrKeyError(nostr::key::Error),
#[error("Payment hash mismatch")]
PaymentHash,
#[error("Error parsing url")]
URLParseError(url::ParseError),
#[error("HTTP error")]
HTTPError(http::Error),
#[error("Unknown/Undocumented")]
UnknownError,
}
//impl From<Box<dyn std::error::Error>> for Error {
// fn from(e: Box<dyn std::error::Error>) -> Self {
// Error::CustomError("error".to_owned())
// }
//}
impl From<hex::FromHexError> for Error {
fn from(h: hex::FromHexError) -> Self {
Error::HexError(h)
}
}
impl From<hyper::Error> for Error {
fn from(h: hyper::Error) -> Self {
Error::HyperError(h)
}
}
impl From<r2d2::Error> for Error {
fn from(d: r2d2::Error) -> Self {
Error::DatabasePoolError(d)
}
}
impl From<tokio::task::JoinError> for Error {
/// Wrap SQL error
fn from(_j: tokio::task::JoinError) -> Self {
Error::JoinError
}
}
impl From<rusqlite::Error> for Error {
@@ -51,6 +124,12 @@ impl From<rusqlite::Error> for Error {
}
}
impl From<sqlx::Error> for Error {
fn from(d: sqlx::Error) -> Self {
Error::SqlxDatabasePoolError(d)
}
}
impl From<serde_json::Error> for Error {
/// Wrap JSON error
fn from(r: serde_json::Error) -> Self {
@@ -71,3 +150,43 @@ impl From<config::ConfigError> for Error {
Error::ConfigError(r)
}
}
impl From<tonic::Status> for Error {
/// Wrap Config error
fn from(r: tonic::Status) -> Self {
Error::TonicError(r)
}
}
impl From<std::io::Error> for Error {
fn from(r: std::io::Error) -> Self {
Error::IoError(r)
}
}
impl From<nostr::event::builder::Error> for Error {
/// Wrap event builder error
fn from(r: nostr::event::builder::Error) -> Self {
Error::EventError(r)
}
}
impl From<nostr::key::Error> for Error {
/// Wrap nostr key error
fn from(r: nostr::key::Error) -> Self {
Error::NostrKeyError(r)
}
}
impl From<url::ParseError> for Error {
/// Wrap nostr key error
fn from(r: url::ParseError) -> Self {
Error::URLParseError(r)
}
}
impl From<http::Error> for Error {
/// Wrap nostr key error
fn from(r: http::Error) -> Self {
Error::HTTPError(r)
}
}

@@ -1,10 +1,16 @@
//! Event parsing and validation
use crate::config;
use crate::error::Error::*;
use crate::delegation::validate_delegation;
use crate::error::Error::{
CommandUnknownError, EventCouldNotCanonicalize, EventInvalidId, EventInvalidSignature,
EventMalformedPubkey,
};
use crate::error::Result;
use crate::event::EventWrapper::WrappedAuth;
use crate::event::EventWrapper::WrappedEvent;
use crate::nip05;
use crate::utils::unix_time;
use bitcoin_hashes::{sha256, Hash};
use lazy_static::lazy_static;
use log::*;
use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
use serde::{Deserialize, Deserializer, Serialize};
use serde_json::value::Value;
@@ -12,34 +18,44 @@ use serde_json::Number;
use std::collections::HashMap;
use std::collections::HashSet;
use std::str::FromStr;
use std::time::SystemTime;
use tracing::{debug, info};
lazy_static! {
/// Secp256k1 verification instance.
pub static ref SECP: Secp256k1<VerifyOnly> = Secp256k1::verification_only();
}
/// Event command in network format
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
/// Event command in network format.
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct EventCmd {
cmd: String, // expecting static "EVENT"
event: Event,
}
/// Event parsed
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
impl EventCmd {
#[must_use]
pub fn event_id(&self) -> &str {
&self.event.id
}
}
/// Parsed nostr event.
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct Event {
pub id: String,
pub(crate) pubkey: String,
pub(crate) created_at: u64,
pub(crate) kind: u64,
pub pubkey: String,
#[serde(skip)]
pub delegated_by: Option<String>,
pub created_at: u64,
pub kind: u64,
#[serde(deserialize_with = "tag_from_string")]
// NOTE: array-of-arrays may need to be more general than a string container
pub(crate) tags: Vec<Vec<String>>,
pub(crate) content: String,
pub(crate) sig: String,
// Optimization for tag search, built on demand
pub tags: Vec<Vec<String>>,
pub content: String,
pub sig: String,
// Optimization for tag search, built on demand.
#[serde(skip)]
pub(crate) tagidx: Option<HashMap<String, HashSet<String>>>,
pub tagidx: Option<HashMap<char, HashSet<String>>>,
}
/// Simple tag type for array of array of strings.
@@ -51,81 +67,274 @@ where
D: Deserializer<'de>,
{
let opt = Option::deserialize(deserializer)?;
Ok(opt.unwrap_or_else(Vec::new))
Ok(opt.unwrap_or_default())
}
/// Attempt to form a single-char tag name.
#[must_use]
pub fn single_char_tagname(tagname: &str) -> Option<char> {
// We return the tag character if and only if the tagname consists
// of a single char.
let mut tagnamechars = tagname.chars();
let firstchar = tagnamechars.next();
match firstchar {
Some(_) => {
// check second char
if tagnamechars.next().is_none() {
firstchar
} else {
None
}
}
None => None,
}
}
pub enum EventWrapper {
WrappedEvent(Event),
WrappedAuth(Event),
}
/// Convert network event to parsed/validated event.
impl From<EventCmd> for Result<Event> {
fn from(ec: EventCmd) -> Result<Event> {
impl From<EventCmd> for Result<EventWrapper> {
fn from(ec: EventCmd) -> Result<EventWrapper> {
// ensure command is correct
if ec.cmd != "EVENT" {
Err(CommandUnknownError)
} else if ec.event.is_valid() {
let mut e = ec.event;
e.build_index();
Ok(e)
if ec.cmd == "EVENT" {
ec.event.validate().map(|_| {
let mut e = ec.event;
e.build_index();
e.update_delegation();
WrappedEvent(e)
})
} else if ec.cmd == "AUTH" {
// we don't want to validate the event here, because NIP-42 can be disabled
// it will be validated later during the authentication process
Ok(WrappedAuth(ec.event))
} else {
Err(EventInvalid)
Err(CommandUnknownError)
}
}
}
/// Seconds since 1970
fn unix_time() -> u64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
.unwrap_or(0)
}
impl Event {
#[cfg(test)]
#[must_use]
pub fn simple_event() -> Event {
Event {
id: "0".to_owned(),
pubkey: "0".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: vec![],
content: "".to_owned(),
sig: "0".to_owned(),
tagidx: None,
}
}
#[must_use]
pub fn is_kind_metadata(&self) -> bool {
self.kind == 0
}
/// Should this event be persisted?
#[must_use]
pub fn is_ephemeral(&self) -> bool {
self.kind >= 20000 && self.kind < 30000
}
/// Is this event currently expired?
pub fn is_expired(&self) -> bool {
if let Some(exp) = self.expiration() {
exp <= unix_time()
} else {
false
}
}
/// Determine the time at which this event should expire
pub fn expiration(&self) -> Option<u64> {
let default = "".to_string();
let dvals: Vec<&String> = self
.tags
.iter()
.filter(|x| !x.is_empty())
.filter(|x| x.first().unwrap() == "expiration")
.map(|x| x.get(1).unwrap_or(&default))
.take(1)
.collect();
let val_first = dvals.first();
val_first.and_then(|t| t.parse::<u64>().ok())
}
/// Should this event be replaced with newer timestamps from same author?
#[must_use]
pub fn is_replaceable(&self) -> bool {
self.kind == 0
|| self.kind == 3
|| self.kind == 41
|| (self.kind >= 10000 && self.kind < 20000)
}
/// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values?
#[must_use]
pub fn is_param_replaceable(&self) -> bool {
self.kind >= 30000 && self.kind < 40000
}
/// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values?
#[must_use]
pub fn distinct_param(&self) -> Option<String> {
if self.is_param_replaceable() {
let default = "".to_string();
let dvals: Vec<&String> = self
.tags
.iter()
.filter(|x| !x.is_empty())
.filter(|x| x.first().unwrap() == "d")
.map(|x| x.get(1).unwrap_or(&default))
.take(1)
.collect();
let dval_first = dvals.first();
match dval_first {
Some(_) => dval_first.map(|x| x.to_string()),
None => Some(default),
}
} else {
None
}
}
/// Pull a NIP-05 Name out of the event, if one exists
#[must_use]
pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
if self.is_kind_metadata() {
// very quick check if we should attempt to parse this json
if self.content.contains("\"nip05\"") {
// Parse into JSON
let md_parsed: Value = serde_json::from_str(&self.content).ok()?;
let md_map = md_parsed.as_object()?;
let nip05_str = md_map.get("nip05")?.as_str()?;
return nip05::Nip05Name::try_from(nip05_str).ok();
}
}
None
}
// is this event delegated (properly)?
// does the signature match, and are conditions valid?
// if so, return an alternate author for the event
#[must_use]
pub fn delegated_author(&self) -> Option<String> {
// is there a delegation tag?
let delegation_tag: Vec<String> = self
.tags
.iter()
.filter(|x| x.len() == 4)
.filter(|x| x.first().unwrap() == "delegation")
.take(1)
.next()?
.clone(); // get first tag
//let delegation_tag = self.tag_values_by_name("delegation");
// delegation tags should have exactly 3 elements after the name (pubkey, condition, sig)
// the event is signed by the delagatee
let delegatee = &self.pubkey;
// the delegation tag references the claimed delagator
let delegator: &str = delegation_tag.get(1)?;
let querystr: &str = delegation_tag.get(2)?;
let sig: &str = delegation_tag.get(3)?;
// attempt to get a condition query; this requires the delegation to have a valid signature.
if let Some(cond_query) = validate_delegation(delegator, delegatee, querystr, sig) {
// The signature was valid, now we ensure the delegation
// condition is valid for this event:
if cond_query.allows_event(self) {
// since this is allowed, we will provide the delegatee
Some(delegator.into())
} else {
debug!("an event failed to satisfy delegation conditions");
None
}
} else {
debug!("event had had invalid delegation signature");
None
}
}
/// Update delegation status
pub fn update_delegation(&mut self) {
self.delegated_by = self.delegated_author();
}
/// Build an event tag index
fn build_index(&mut self) {
pub fn build_index(&mut self) {
// if there are no tags; just leave the index as None
if self.tags.is_empty() {
return;
}
// otherwise, build an index
let mut idx: HashMap<String, HashSet<String>> = HashMap::new();
let mut idx: HashMap<char, HashSet<String>> = HashMap::new();
// iterate over tags that have at least 2 elements
for t in self.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagname = t.first().unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
let tagnamechar = tagnamechar_opt.unwrap();
let tagval = t.get(1).unwrap();
// ensure a vector exists for this tag
if !idx.contains_key(tagname) {
idx.insert(tagname.clone(), HashSet::new());
}
idx.entry(tagnamechar).or_default();
// get the tag vec and insert entry
let tidx = idx.get_mut(tagname).expect("could not get tag vector");
tidx.insert(tagval.clone());
let idx_tag_vec = idx.get_mut(&tagnamechar).expect("could not get tag vector");
idx_tag_vec.insert(tagval.clone());
}
// save the tag structure
self.tagidx = Some(idx);
}
/// Create a short event identifier, suitable for logging.
#[must_use]
pub fn get_event_id_prefix(&self) -> String {
self.id.chars().take(8).collect()
}
#[must_use]
pub fn get_author_prefix(&self) -> String {
self.pubkey.chars().take(8).collect()
}
/// Check if this event has a valid signature.
fn is_valid(&self) -> bool {
// TODO: return a Result with a reason for invalid events
// don't bother to validate an event with a timestamp in the distant future.
let config = config::SETTINGS.read().unwrap();
let max_future_sec = config.options.reject_future_seconds;
if let Some(allowable_future) = max_future_sec {
/// Retrieve tag initial values across all tags matching the name
#[must_use]
pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
self.tags
.iter()
.filter(|x| x.len() > 1)
.filter(|x| x.first().unwrap() == tag_name)
.map(|x| x.get(1).unwrap().clone())
.collect()
}
#[must_use]
pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
if let Some(allowable_future) = reject_future_seconds {
let curr_time = unix_time();
// calculate difference, plus how far future we allow
if curr_time + (allowable_future as u64) < self.created_at {
let delta = self.created_at - curr_time;
debug!(
"Event is too far in the future ({} seconds), rejecting",
"event is too far in the future ({} seconds), rejecting",
delta
);
return false;
}
}
true
}
/// Check if this event has a valid signature.
pub fn validate(&self) -> Result<()> {
// TODO: return a Result with a reason for invalid events
// validation is performed by:
// * parsing JSON string into event fields
// * create an array:
@@ -133,39 +342,43 @@ impl Event {
// * serialize with no spaces/newlines
let c_opt = self.to_canonical();
if c_opt.is_none() {
info!("event could not be canonicalized");
return false;
debug!("could not canonicalize");
return Err(EventCouldNotCanonicalize);
}
let c = c_opt.unwrap();
// * compute the sha256sum.
let digest: sha256::Hash = sha256::Hash::hash(c.as_bytes());
let hex_digest = format!("{:x}", digest);
let hex_digest = format!("{digest:x}");
// * ensure the id matches the computed sha256sum.
if self.id != hex_digest {
return false;
debug!("event id does not match digest");
return Err(EventInvalidId);
}
// * validate the message digest (sig) using the pubkey & computed sha256 message hash.
let sig = schnorr::Signature::from_str(&self.sig).unwrap();
let sig = schnorr::Signature::from_str(&self.sig).map_err(|_| EventInvalidSignature)?;
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
let pubkey = XOnlyPublicKey::from_str(&self.pubkey).unwrap();
let verify = SECP.verify_schnorr(&sig, &msg, &pubkey);
matches!(verify, Ok(()))
if let Ok(pubkey) = XOnlyPublicKey::from_str(&self.pubkey) {
SECP.verify_schnorr(&sig, &msg, &pubkey)
.map_err(|_| EventInvalidSignature)
} else {
debug!("client sent malformed pubkey");
Err(EventMalformedPubkey)
}
} else {
warn!("Error converting digest to secp256k1 message");
false
info!("error converting digest to secp256k1 message");
Err(EventInvalidSignature)
}
}
/// Convert event to canonical representation for signing.
fn to_canonical(&self) -> Option<String> {
pub fn to_canonical(&self) -> Option<String> {
// create a JsonValue for each event element
let mut c: Vec<Value> = vec![];
// id must be set to 0
let id = Number::from(0_u64);
c.push(serde_json::Value::Number(id));
// public key
c.push(Value::String(self.pubkey.to_owned()));
c.push(Value::String(self.pubkey.clone()));
// creation time
let created_at = Number::from(self.created_at);
c.push(serde_json::Value::Number(created_at));
@@ -175,7 +388,7 @@ impl Event {
// tags
c.push(self.tags_to_canonical());
// content
c.push(Value::String(self.content.to_owned()));
c.push(Value::String(self.content.clone()));
serde_json::to_string(&Value::Array(c)).ok()
}
@@ -183,11 +396,11 @@ impl Event {
fn tags_to_canonical(&self) -> Value {
let mut tags = Vec::<Value>::new();
// iterate over self tags,
for t in self.tags.iter() {
for t in &self.tags {
// each tag is a vec of strings
let mut a = Vec::<Value>::new();
for v in t.iter() {
a.push(serde_json::Value::String(v.to_owned()));
a.push(serde_json::Value::String(v.clone()));
}
tags.push(serde_json::Value::Array(a));
}
@@ -195,9 +408,11 @@ impl Event {
}
/// Determine if the given tag and value set intersect with tags in this event.
pub fn generic_tag_val_intersect(&self, tagname: &str, check: &HashSet<String>) -> bool {
#[must_use]
pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
match &self.tagidx {
Some(idx) => match idx.get(tagname) {
// check if this is indexable tagname
Some(idx) => match idx.get(&tagname) {
Some(valset) => {
let common = valset.intersection(check);
common.count() > 0
@@ -209,65 +424,62 @@ impl Event {
}
}
#[cfg(test)]
mod tests {
use super::*;
fn simple_event() -> Event {
impl From<nostr::Event> for Event {
fn from(nostr_event: nostr::Event) -> Self {
Event {
id: "0".to_owned(),
pubkey: "0".to_owned(),
created_at: 0,
kind: 0,
tags: vec![],
content: "".to_owned(),
sig: "0".to_owned(),
id: nostr_event.id.to_hex(),
pubkey: nostr_event.pubkey.to_string(),
created_at: nostr_event.created_at.as_u64(),
kind: nostr_event.kind.as_u64(),
tags: nostr_event.tags.iter().map(|x| x.as_vec()).collect(),
content: nostr_event.content,
sig: nostr_event.sig.to_string(),
delegated_by: None,
tagidx: None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn event_creation() {
// create an event
let event = simple_event();
let event = Event::simple_event();
assert_eq!(event.id, "0");
}
#[test]
fn event_serialize() -> Result<()> {
// serialize an event to JSON string
let event = simple_event();
let event = Event::simple_event();
let j = serde_json::to_string(&event)?;
assert_eq!(j, "{\"id\":\"0\",\"pubkey\":\"0\",\"created_at\":0,\"kind\":0,\"tags\":[],\"content\":\"\",\"sig\":\"0\"}");
Ok(())
}
#[test]
fn empty_event_tag_match() -> Result<()> {
let event = simple_event();
fn empty_event_tag_match() {
let event = Event::simple_event();
assert!(!event
.generic_tag_val_intersect("e", &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
Ok(())
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
}
#[test]
fn single_event_tag_match() -> Result<()> {
let mut event = simple_event();
fn single_event_tag_match() {
let mut event = Event::simple_event();
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
event.build_index();
assert_eq!(
event.generic_tag_val_intersect(
"e",
&HashSet::from(["foo".to_owned(), "bar".to_owned()])
),
true
);
Ok(())
assert!(event
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
}
#[test]
fn event_tags_serialize() -> Result<()> {
// serialize an event with tags to JSON string
let mut event = simple_event();
let mut event = Event::simple_event();
event.tags = vec![
vec![
"e".to_owned(),
@@ -299,7 +511,8 @@ mod tests {
let e = Event {
id: "999".to_owned(),
pubkey: "012345".to_owned(),
created_at: 501234,
delegated_by: None,
created_at: 501_234,
kind: 1,
tags: vec![],
content: "this is a test".to_owned(),
@@ -311,12 +524,67 @@ mod tests {
assert_eq!(c, expected);
}
#[test]
fn event_tag_select() {
let e = Event {
id: "999".to_owned(),
pubkey: "012345".to_owned(),
delegated_by: None,
created_at: 501_234,
kind: 1,
tags: vec![
vec!["j".to_owned(), "abc".to_owned()],
vec!["e".to_owned(), "foo".to_owned()],
vec!["e".to_owned(), "bar".to_owned()],
vec!["e".to_owned(), "baz".to_owned()],
vec![
"p".to_owned(),
"aaaa".to_owned(),
"ws://example.com".to_owned(),
],
],
content: "this is a test".to_owned(),
sig: "abcde".to_owned(),
tagidx: None,
};
let v = e.tag_values_by_name("e");
assert_eq!(v, vec!["foo", "bar", "baz"]);
}
#[test]
fn event_no_tag_select() {
let e = Event {
id: "999".to_owned(),
pubkey: "012345".to_owned(),
delegated_by: None,
created_at: 501_234,
kind: 1,
tags: vec![
vec!["j".to_owned(), "abc".to_owned()],
vec!["e".to_owned(), "foo".to_owned()],
vec!["e".to_owned(), "baz".to_owned()],
vec![
"p".to_owned(),
"aaaa".to_owned(),
"ws://example.com".to_owned(),
],
],
content: "this is a test".to_owned(),
sig: "abcde".to_owned(),
tagidx: None,
};
let v = e.tag_values_by_name("x");
// asking for tags that don't exist just returns zero-length vector
assert_eq!(v.len(), 0);
}
#[test]
fn event_canonical_with_tags() {
let e = Event {
id: "999".to_owned(),
pubkey: "012345".to_owned(),
created_at: 501234,
delegated_by: None,
created_at: 501_234,
kind: 1,
tags: vec![
vec!["#e".to_owned(), "aoeu".to_owned()],
@@ -335,4 +603,188 @@ mod tests {
let expected = Some(expected_json.to_owned());
assert_eq!(c, expected);
}
#[test]
fn ephemeral_event() {
let mut event = Event::simple_event();
event.kind = 20000;
assert!(event.is_ephemeral());
event.kind = 29999;
assert!(event.is_ephemeral());
event.kind = 30000;
assert!(!event.is_ephemeral());
event.kind = 19999;
assert!(!event.is_ephemeral());
}
#[test]
fn replaceable_event() {
let mut event = Event::simple_event();
event.kind = 0;
assert!(event.is_replaceable());
event.kind = 3;
assert!(event.is_replaceable());
event.kind = 10000;
assert!(event.is_replaceable());
event.kind = 19999;
assert!(event.is_replaceable());
event.kind = 20000;
assert!(!event.is_replaceable());
}
#[test]
fn param_replaceable_event() {
let mut event = Event::simple_event();
event.kind = 30000;
assert!(event.is_param_replaceable());
event.kind = 39999;
assert!(event.is_param_replaceable());
event.kind = 29999;
assert!(!event.is_param_replaceable());
event.kind = 40000;
assert!(!event.is_param_replaceable());
}
#[test]
fn param_replaceable_value_case_1() {
// NIP case #1: "tags":[["d",""]]
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![vec!["d".to_owned(), "".to_owned()]];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
#[test]
fn param_replaceable_value_case_2() {
// NIP case #2: "tags":[]: implicit d tag with empty value
let mut event = Event::simple_event();
event.kind = 30000;
assert_eq!(event.distinct_param(), Some("".to_string()));
}
#[test]
fn param_replaceable_value_case_3() {
// NIP case #3: "tags":[["d"]]: implicit empty value ""
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![vec!["d".to_owned()]];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
#[test]
fn param_replaceable_value_case_4() {
// NIP case #4: "tags":[["d",""],["d","not empty"]]: only first d tag is considered
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![
vec!["d".to_owned(), "".to_string()],
vec!["d".to_owned(), "not empty".to_string()],
];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
#[test]
fn param_replaceable_value_case_4b() {
// Variation of #4 with
// NIP case #4: "tags":[["d","not empty"],["d",""]]: only first d tag is considered
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![
vec!["d".to_owned(), "not empty".to_string()],
vec!["d".to_owned(), "".to_string()],
];
assert_eq!(event.distinct_param(), Some("not empty".to_string()));
}
#[test]
fn param_replaceable_value_case_5() {
// NIP case #5: "tags":[["d"],["d","some value"]]: only first d tag is considered
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![
vec!["d".to_owned()],
vec!["d".to_owned(), "second value".to_string()],
vec!["d".to_owned(), "third value".to_string()],
];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
#[test]
fn param_replaceable_value_case_6() {
// NIP case #6: "tags":[["e"]]: same as no tags
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![vec!["e".to_owned()]];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
#[test]
fn expiring_event_none() {
// regular events do not expire
let mut event = Event::simple_event();
event.kind = 7;
event.tags = vec![vec!["test".to_string(), "foo".to_string()]];
assert_eq!(event.expiration(), None);
}
#[test]
fn expiring_event_empty() {
// regular events do not expire
let mut event = Event::simple_event();
event.kind = 7;
event.tags = vec![vec!["expiration".to_string()]];
assert_eq!(event.expiration(), None);
}
#[test]
fn expiring_event_future() {
// a normal expiring event
let exp: u64 = 1676264138;
let mut event = Event::simple_event();
event.kind = 1;
event.tags = vec![vec!["expiration".to_string(), exp.to_string()]];
assert_eq!(event.expiration(), Some(exp));
}
#[test]
fn expiring_event_negative() {
// expiration set to a negative value (invalid)
let exp: i64 = -90;
let mut event = Event::simple_event();
event.kind = 1;
event.tags = vec![vec!["expiration".to_string(), exp.to_string()]];
assert_eq!(event.expiration(), None);
}
#[test]
fn expiring_event_zero() {
// a normal expiring event set to zero
let exp: i64 = 0;
let mut event = Event::simple_event();
event.kind = 1;
event.tags = vec![vec!["expiration".to_string(), exp.to_string()]];
assert_eq!(event.expiration(), Some(0));
}
#[test]
fn expiring_event_fraction() {
// expiration is fractional (invalid)
let exp: f64 = 23.334;
let mut event = Event::simple_event();
event.kind = 1;
event.tags = vec![vec!["expiration".to_string(), exp.to_string()]];
assert_eq!(event.expiration(), None);
}
#[test]
fn expiring_event_multiple() {
// multiple values, we just take the first
let mut event = Event::simple_event();
event.kind = 1;
event.tags = vec![
vec!["expiration".to_string(), (10).to_string()],
vec!["expiration".to_string(), (20).to_string()],
];
assert_eq!(event.expiration(), Some(10));
}
}

@@ -1,8 +1,38 @@
use crate::config;
//! Relay metadata using NIP-11
/// Relay Info
use crate::config::Settings;
use serde::{Deserialize, Serialize};
const CARGO_PKG_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
pub const CARGO_PKG_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
pub const UNIT: &str = "msats";
/// Limitations of the relay as specified in NIP-111
/// (This nip isn't finalized so may change)
#[derive(Debug, Serialize, Deserialize)]
#[allow(unused)]
pub struct Limitation {
#[serde(skip_serializing_if = "Option::is_none")]
payment_required: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
restricted_writes: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug)]
#[allow(unused)]
pub struct Fees {
#[serde(skip_serializing_if = "Option::is_none")]
admission: Option<Vec<Fee>>,
#[serde(skip_serializing_if = "Option::is_none")]
publication: Option<Vec<Fee>>,
}
#[derive(Serialize, Deserialize, Debug)]
#[allow(unused)]
pub struct Fee {
amount: u64,
unit: String,
}
#[derive(Debug, Serialize, Deserialize)]
#[allow(unused)]
@@ -18,25 +48,94 @@ pub struct RelayInfo {
#[serde(skip_serializing_if = "Option::is_none")]
pub contact: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub icon: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub supported_nips: Option<Vec<i64>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub software: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub limitation: Option<Limitation>,
#[serde(skip_serializing_if = "Option::is_none")]
pub payment_url: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub fees: Option<Fees>,
}
/// Convert an Info configuration into public Relay Info
impl From<config::Info> for RelayInfo {
fn from(i: config::Info) -> Self {
impl From<Settings> for RelayInfo {
fn from(c: Settings) -> Self {
let mut supported_nips = vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 33, 40];
if c.authorization.nip42_auth {
supported_nips.push(42);
supported_nips.sort();
}
let i = c.info;
let p = c.pay_to_relay;
let limitations = Limitation {
payment_required: Some(p.enabled),
restricted_writes: Some(
p.enabled
|| c.verified_users.is_enabled()
|| c.authorization.pubkey_whitelist.is_some()
|| c.grpc.restricts_write,
),
};
let (payment_url, fees) = if p.enabled {
let admission_fee = if p.admission_cost > 0 {
Some(vec![Fee {
amount: p.admission_cost * 1000,
unit: UNIT.to_string(),
}])
} else {
None
};
let post_fee = if p.cost_per_event > 0 {
Some(vec![Fee {
amount: p.cost_per_event * 1000,
unit: UNIT.to_string(),
}])
} else {
None
};
let fees = Fees {
admission: admission_fee,
publication: post_fee,
};
let payment_url = if p.enabled && i.relay_url.is_some() {
Some(format!(
"{}join",
i.relay_url.clone().unwrap().replace("ws", "http")
))
} else {
None
};
(payment_url, Some(fees))
} else {
(None, None)
};
RelayInfo {
id: i.relay_url,
name: i.name,
description: i.description,
pubkey: i.pubkey,
contact: i.contact,
supported_nips: Some(vec![1, 2, 11]),
supported_nips: Some(supported_nips),
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
version: CARGO_PKG_VERSION.map(|x| x.to_owned()),
version: CARGO_PKG_VERSION.map(std::borrow::ToOwned::to_owned),
limitation: Some(limitations),
payment_url,
fees,
icon: i.relay_icon,
}
}
}

@@ -1,10 +1,18 @@
pub mod cli;
pub mod close;
pub mod config;
pub mod conn;
pub mod db;
pub mod delegation;
pub mod error;
pub mod event;
pub mod info;
pub mod protostream;
pub mod nauthz;
pub mod nip05;
pub mod notice;
pub mod repo;
pub mod subscription;
pub mod tags;
pub mod utils;
// Public API for creating relays programmatically
pub mod payment;
pub mod server;

@@ -1,406 +1,109 @@
//! Server process
use futures::SinkExt;
use futures::StreamExt;
use hyper::header::ACCEPT;
use hyper::service::{make_service_fn, service_fn};
use hyper::upgrade::Upgraded;
use hyper::{
header, server::conn::AddrStream, upgrade, Body, Request, Response, Server, StatusCode,
};
use log::*;
use nostr_rs_relay::close::Close;
use clap::Parser;
use console_subscriber::ConsoleLayer;
use nostr_rs_relay::cli::CLIArgs;
use nostr_rs_relay::config;
use nostr_rs_relay::conn;
use nostr_rs_relay::db;
use nostr_rs_relay::error::{Error, Result};
use nostr_rs_relay::event::Event;
use nostr_rs_relay::info::RelayInfo;
use nostr_rs_relay::protostream;
use nostr_rs_relay::protostream::NostrMessage::*;
use nostr_rs_relay::protostream::NostrResponse::*;
use std::collections::HashMap;
use std::convert::Infallible;
use std::env;
use std::net::SocketAddr;
use nostr_rs_relay::server::start_server;
use std::fs;
use std::path::Path;
use tokio::runtime::Builder;
use tokio::sync::broadcast;
use tokio::sync::broadcast::{Receiver, Sender};
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tokio_tungstenite::WebSocketStream;
use tungstenite::handshake;
use tungstenite::protocol::WebSocketConfig;
use std::process;
use std::sync::mpsc as syncmpsc;
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
use std::thread;
#[cfg(all(not(target_env = "msvc"), not(target_os = "openbsd")))]
use tikv_jemallocator::Jemalloc;
use tracing::info;
use tracing_appender::non_blocking::WorkerGuard;
use tracing_subscriber::EnvFilter;
/// Return a requested DB name from command line arguments.
fn db_from_args(args: Vec<String>) -> Option<String> {
if args.len() == 3 && args.get(1) == Some(&"--db".to_owned()) {
return args.get(2).map(|x| x.to_owned());
}
None
}
/// Handle arbitrary HTTP requests, including for WebSocket upgrades.
async fn handle_web_request(
mut request: Request<Body>,
pool: db::SqlitePool,
remote_addr: SocketAddr,
broadcast: Sender<Event>,
event_tx: tokio::sync::mpsc::Sender<Event>,
shutdown: Receiver<()>,
) -> Result<Response<Body>, Infallible> {
match (
request.uri().path(),
request.headers().contains_key(header::UPGRADE),
) {
// Request for / as websocket
("/", true) => {
debug!("websocket with upgrade request");
//assume request is a handshake, so create the handshake response
let response = match handshake::server::create_response_with_body(&request, || {
Body::empty()
}) {
Ok(response) => {
//in case the handshake response creation succeeds,
//spawn a task to handle the websocket connection
tokio::spawn(async move {
//using the hyper feature of upgrading a connection
match upgrade::on(&mut request).await {
//if successfully upgraded
Ok(upgraded) => {
// set WebSocket configuration options
let mut config = WebSocketConfig::default();
{
let settings = config::SETTINGS.read().unwrap();
config.max_message_size = settings.limits.max_ws_message_bytes;
config.max_frame_size = settings.limits.max_ws_frame_bytes;
}
//create a websocket stream from the upgraded object
let ws_stream = WebSocketStream::from_raw_socket(
//pass the upgraded object
//as the base layer stream of the Websocket
upgraded,
tokio_tungstenite::tungstenite::protocol::Role::Server,
Some(config),
)
.await;
tokio::spawn(nostr_server(
pool, ws_stream, broadcast, event_tx, shutdown,
));
}
Err(e) => println!(
"error when trying to upgrade connection \
from address {} to websocket connection. \
Error is: {}",
remote_addr, e
),
}
});
//return the response to the handshake request
response
}
Err(error) => {
warn!("websocket response failed");
let mut res =
Response::new(Body::from(format!("Failed to create websocket: {}", error)));
*res.status_mut() = StatusCode::BAD_REQUEST;
return Ok(res);
}
};
Ok::<_, Infallible>(response)
}
// Request for Relay info
("/", false) => {
// handle request at root with no upgrade header
// Check if this is a nostr server info request
let accept_header = &request.headers().get(ACCEPT);
// check if application/nostr+json is included
if let Some(media_types) = accept_header {
if let Ok(mt_str) = media_types.to_str() {
if mt_str.contains("application/nostr+json") {
let config = config::SETTINGS.read().unwrap();
// build a relay info response
debug!("Responding to server info request");
let rinfo = RelayInfo::from(config.info.clone());
let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap());
return Ok(Response::builder()
.status(200)
.header("Content-Type", "application/nostr+json")
.body(b)
.unwrap());
}
}
}
Ok(Response::new(Body::from(
"Please use a Nostr client to connect.",
)))
}
(_, _) => {
//handle any other url
Ok(Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::from("Nothing here."))
.unwrap())
}
}
}
async fn shutdown_signal() {
// Wait for the CTRL+C signal
tokio::signal::ctrl_c()
.await
.expect("failed to install CTRL+C signal handler");
}
#[cfg(all(not(target_env = "msvc"), not(target_os = "openbsd")))]
#[global_allocator]
static GLOBAL: Jemalloc = Jemalloc;
/// Start running a Nostr relay server.
fn main() -> Result<(), Error> {
// setup logger
let _ = env_logger::try_init();
// get database directory from args
let args: Vec<String> = env::args().collect();
let db_dir: Option<String> = db_from_args(args);
{
let mut settings = config::SETTINGS.write().unwrap();
// replace default settings with those read from config.toml
let mut c = config::Settings::new();
// update with database location
if let Some(db) = db_dir {
c.database.data_directory = db;
fn main() {
let args = CLIArgs::parse();
// get config file name from args
let config_file_arg = args.config;
// Ensure the config file is readable if it was explicitly set
if let Some(config_path) = config_file_arg.as_ref() {
let path = Path::new(&config_path);
if !path.exists() {
eprintln!("Config file not found: {}", &config_path);
process::exit(1);
}
if !path.is_file() {
eprintln!("Invalid config file path: {}", &config_path);
process::exit(1);
}
if let Err(err) = fs::metadata(path) {
eprintln!("Error while accessing file metadata: {}", err);
process::exit(1);
}
if let Err(err) = fs::File::open(path) {
eprintln!("Config file is not readable: {}", err);
process::exit(1);
}
*settings = c;
}
let config = config::SETTINGS.read().unwrap();
// do some config validation.
if !Path::new(&config.database.data_directory).is_dir() {
error!("Database directory does not exist");
return Err(Error::DatabaseDirError);
}
debug!("config: {:?}", config);
let addr = format!("{}:{}", config.network.address.trim(), config.network.port);
let socket_addr = addr.parse().expect("listening address not valid");
// configure tokio runtime
let rt = Builder::new_multi_thread()
.enable_all()
.thread_name("tokio-ws")
.build()
.unwrap();
// start tokio
rt.block_on(async {
let settings = config::SETTINGS.read().unwrap();
info!("listening on: {}", socket_addr);
// build a connection pool for sqlite connections
let pool = db::build_read_pool();
// all client-submitted valid events are broadcast to every
// other client on this channel. This should be large enough
// to accomodate slower readers (messages are dropped if
// clients can not keep up).
let (bcast_tx, _) = broadcast::channel::<Event>(settings.limits.broadcast_buffer);
// validated events that need to be persisted are sent to the
// database on via this channel.
let (event_tx, event_rx) = mpsc::channel::<Event>(settings.limits.event_persist_buffer);
// establish a channel for letting all threads now about a
// requested server shutdown.
let (invoke_shutdown, _) = broadcast::channel::<()>(1);
let ctrl_c_shutdown = invoke_shutdown.clone();
// // listen for ctrl-c interruupts
tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap();
info!("shutting down due to SIGINT");
ctrl_c_shutdown.send(()).ok();
});
// start the database writer thread. Give it a channel for
// writing events, and for publishing events that have been
// written (to all connected clients).
db::db_writer(event_rx, bcast_tx.clone(), invoke_shutdown.subscribe()).await;
info!("db writer created");
// A `Service` is needed for every connection, so this
// creates one from our `handle_request` function.
let make_svc = make_service_fn(|conn: &AddrStream| {
let svc_pool = pool.clone();
let remote_addr = conn.remote_addr();
let bcast = bcast_tx.clone();
let event = event_tx.clone();
let stop = invoke_shutdown.clone();
async move {
// service_fn converts our function into a `Service`
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
handle_web_request(
request,
svc_pool.clone(),
remote_addr,
bcast.clone(),
event.clone(),
stop.subscribe(),
)
}))
}
});
let server = Server::bind(&socket_addr)
.serve(make_svc)
.with_graceful_shutdown(shutdown_signal());
// run hyper
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
// our code
let mut _log_guard: Option<WorkerGuard> = None;
// configure settings from the config file (defaults to config.toml)
// replace default settings with those read from the config file
let mut settings = config::Settings::new(&config_file_arg).unwrap_or_else(|e| {
eprintln!("Error reading config file ({:?})", e);
process::exit(1);
});
Ok(())
}
/// Handle new client connections. This runs through an event loop
/// for all client communication.
async fn nostr_server(
pool: db::SqlitePool,
ws_stream: WebSocketStream<Upgraded>,
broadcast: Sender<Event>,
event_tx: tokio::sync::mpsc::Sender<Event>,
mut shutdown: Receiver<()>,
) {
// get a broadcast channel for clients to communicate on
let mut bcast_rx = broadcast.subscribe();
// upgrade the TCP connection to WebSocket
//let conn = tokio_tungstenite::accept_async_with_config(stream, Some(config)).await;
//let ws_stream = conn.expect("websocket handshake error");
// wrap websocket into a stream & sink of Nostr protocol messages
let mut nostr_stream = protostream::wrap_ws_in_nostr(ws_stream);
// Track internal client state
let mut conn = conn::ClientConn::new();
let cid = conn.get_client_prefix();
// Create a channel for receiving query results from the database.
// we will send out the tx handle to any query we generate.
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(256);
// maintain a hashmap of a oneshot channel for active subscriptions.
// when these subscriptions are cancelled, make a message
// available to the executing query so it knows to stop.
//let (abandon_query_tx, _) = oneshot::channel::<()>();
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
// for stats, keep track of how many events the client published,
// and how many it received from queries.
let mut client_published_event_count: usize = 0;
let mut client_received_event_count: usize = 0;
info!("new connection for client: {}", cid);
loop {
tokio::select! {
_ = shutdown.recv() => {
// server shutting down, exit loop
break;
},
Some(query_result) = query_rx.recv() => {
// database informed us of a query result we asked for
let res = EventRes(query_result.sub_id,query_result.event);
client_received_event_count += 1;
nostr_stream.send(res).await.ok();
},
Ok(global_event) = bcast_rx.recv() => {
// an event has been broadcast to all clients
// first check if there is a subscription for this event.
let matching_subs = conn.get_matching_subscriptions(&global_event);
for s in matching_subs {
// TODO: serialize at broadcast time, instead of
// once for each consumer.
if let Ok(event_str) = serde_json::to_string(&global_event) {
debug!("sub match: client: {}, sub: {}, event: {}",
cid, s,
global_event.get_event_id_prefix());
// create an event response and send it
let res = EventRes(s.to_owned(),event_str);
nostr_stream.send(res).await.ok();
} else {
warn!("could not convert event to string");
}
}
},
// check if this client has a subscription
proto_next = nostr_stream.next() => {
match proto_next {
Some(Ok(EventMsg(ec))) => {
// An EventCmd needs to be validated to be converted into an Event
// handle each type of message
let parsed : Result<Event> = Result::<Event>::from(ec);
match parsed {
Ok(e) => {
let id_prefix:String = e.id.chars().take(8).collect();
debug!("successfully parsed/validated event: {} from client: {}", id_prefix, cid);
// Write this to the database
event_tx.send(e.clone()).await.ok();
client_published_event_count += 1;
},
Err(_) => {
info!("client {} sent an invalid event", cid);
nostr_stream.send(NoticeRes("event was invalid".to_owned())).await.ok();
}
}
},
Some(Ok(SubMsg(s))) => {
debug!("client {} requesting a subscription", cid);
// subscription handling consists of:
// * registering the subscription so future events can be matched
// * making a channel to cancel to request later
// * sending a request for a SQL query
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
match conn.subscribe(s.clone()) {
Ok(()) => {
running_queries.insert(s.id.to_owned(), abandon_query_tx);
// start a database query
// show pool stats
debug!("DB pool stats: {:?}", pool.state());
db::db_query(s, pool.get().expect("could not get connection"), query_tx.clone(), abandon_query_rx).await;
},
Err(e) => {
info!("Subscription error: {}", e);
nostr_stream.send(NoticeRes(format!("{}",e))).await.ok();
// setup tracing
if settings.diagnostics.tracing {
// enable tracing with tokio-console
ConsoleLayer::builder().with_default_env().init();
} else {
// standard logging
if let Some(path) = &settings.logging.folder_path {
// write logs to a folder
let prefix = match &settings.logging.file_prefix {
Some(p) => p.as_str(),
None => "relay",
};
let file_appender = tracing_appender::rolling::daily(path, prefix);
let (non_blocking, guard) = tracing_appender::non_blocking(file_appender);
let filter = EnvFilter::from_default_env();
// assign to a variable that is not dropped till the program ends
_log_guard = Some(guard);
}
}
},
Some(Ok(CloseMsg(cc))) => {
// closing a request simply removes the subscription.
let parsed : Result<Close> = Result::<Close>::from(cc);
match parsed {
Ok(c) => {
// check if a query is currently
// running, and remove it if so.
let stop_tx = running_queries.remove(&c.id);
if let Some(tx) = stop_tx {
tx.send(()).ok();
}
// stop checking new events against
// the subscription
conn.unsubscribe(c);
},
Err(_) => {
info!("invalid command ignored");
}
}
},
None => {
debug!("normal websocket close from client: {}",cid);
break;
},
Some(Err(Error::ConnError)) => {
debug!("got connection close/error, disconnecting client: {}",cid);
break;
}
Some(Err(Error::EventMaxLengthError(s))) => {
info!("client {} sent event larger ({} bytes) than max size", cid, s);
nostr_stream.send(NoticeRes("event exceeded max size".to_owned())).await.ok();
},
Some(Err(e)) => {
info!("got non-fatal error from client: {}, error: {:?}", cid, e);
},
}
},
tracing_subscriber::fmt()
.with_env_filter(filter)
.with_writer(non_blocking)
.try_init()
.unwrap();
} else {
// write to stdout
tracing_subscriber::fmt::try_init().unwrap();
}
}
// connection cleanup - ensure any still running queries are terminated.
for (_, stop_tx) in running_queries.into_iter() {
stop_tx.send(()).ok();
info!("Starting up from main");
// get database directory from args
let db_dir_arg = args.db;
// update with database location from args, if provided
if let Some(db_dir) = db_dir_arg {
settings.database.data_directory = db_dir;
}
info!(
"stopping connection for client: {} (client sent {} event(s), received {})",
cid, client_published_event_count, client_received_event_count
);
// we should have a 'control plane' channel to monitor and bump
// the server. this will let us do stuff like clear the database,
// shutdown, etc.; for now all this does is initiate shutdown if
// `()` is sent. This will change in the future, this is just a
// stopgap to shutdown the relay when it is used as a library.
let (_, ctrl_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
// run this in a new thread
let handle = thread::spawn(move || {
let _svr = start_server(&settings, ctrl_rx);
});
// block on nostr thread to finish.
handle.join().unwrap();
}

111
src/nauthz.rs Normal file

@@ -0,0 +1,111 @@
use crate::error::{Error, Result};
use crate::{event::Event, nip05::Nip05Name};
use nauthz_grpc::authorization_client::AuthorizationClient;
use nauthz_grpc::event::TagEntry;
use nauthz_grpc::{Decision, Event as GrpcEvent, EventReply, EventRequest};
use tracing::{info, warn};
pub mod nauthz_grpc {
tonic::include_proto!("nauthz");
}
// A decision for the DB to act upon
pub trait AuthzDecision: Send + Sync {
fn permitted(&self) -> bool;
fn message(&self) -> Option<String>;
}
impl AuthzDecision for EventReply {
fn permitted(&self) -> bool {
self.decision == Decision::Permit as i32
}
fn message(&self) -> Option<String> {
self.message.clone()
}
}
// A connection to an event admission GRPC server
pub struct EventAuthzService {
server_addr: String,
conn: Option<AuthorizationClient<tonic::transport::Channel>>,
}
// conversion of Nip05Names into GRPC type
impl std::convert::From<Nip05Name> for nauthz_grpc::event_request::Nip05Name {
fn from(value: Nip05Name) -> Self {
nauthz_grpc::event_request::Nip05Name {
local: value.local.clone(),
domain: value.domain,
}
}
}
// conversion of event tags into gprc struct
fn tags_to_protobuf(tags: &[Vec<String>]) -> Vec<TagEntry> {
tags.iter()
.map(|x| TagEntry { values: x.clone() })
.collect()
}
impl EventAuthzService {
pub async fn connect(server_addr: &str) -> EventAuthzService {
let mut eas = EventAuthzService {
server_addr: server_addr.to_string(),
conn: None,
};
eas.ready_connection().await;
eas
}
pub async fn ready_connection(&mut self) {
if self.conn.is_none() {
let client = AuthorizationClient::connect(self.server_addr.to_string()).await;
if let Err(ref msg) = client {
warn!("could not connect to nostr authz GRPC server: {:?}", msg);
} else {
info!("connected to nostr authorization GRPC server");
}
self.conn = client.ok();
}
}
pub async fn admit_event(
&mut self,
event: &Event,
ip: &str,
origin: Option<String>,
user_agent: Option<String>,
nip05: Option<Nip05Name>,
auth_pubkey: Option<Vec<u8>>,
) -> Result<Box<dyn AuthzDecision>> {
self.ready_connection().await;
let id_blob = hex::decode(&event.id)?;
let pubkey_blob = hex::decode(&event.pubkey)?;
let sig_blob = hex::decode(&event.sig)?;
if let Some(ref mut c) = self.conn {
let gevent = GrpcEvent {
id: id_blob,
pubkey: pubkey_blob,
sig: sig_blob,
created_at: event.created_at,
kind: event.kind,
content: event.content.clone(),
tags: tags_to_protobuf(&event.tags),
};
let svr_res = c
.event_admit(EventRequest {
event: Some(gevent),
ip_addr: Some(ip.to_string()),
origin,
user_agent,
auth_pubkey,
nip05: nip05.map(nauthz_grpc::event_request::Nip05Name::from),
})
.await?;
let reply = svr_res.into_inner();
Ok(Box::new(reply))
} else {
Err(Error::AuthzError)
}
}
}

658
src/nip05.rs Normal file

@@ -0,0 +1,658 @@
//! User verification using NIP-05 names
//!
//! NIP-05 defines a mechanism for authors to associate an internet
//! address with their public key, in metadata events. This module
//! consumes a stream of metadata events, and keeps a database table
//! updated with the current NIP-05 verification status.
use crate::config::VerifiedUsers;
use crate::error::{Error, Result};
use crate::event::Event;
use crate::repo::NostrRepo;
use hyper::body::HttpBody;
use hyper::client::connect::HttpConnector;
use hyper::Client;
use hyper_rustls::HttpsConnector;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use std::time::SystemTime;
use tokio::time::Interval;
use tracing::{debug, info, warn};
/// NIP-05 verifier state
pub struct Verifier {
/// Repository for saving/retrieving events and records
repo: Arc<dyn NostrRepo>,
/// Metadata events for us to inspect
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
/// Newly validated events get written and then broadcast on this channel to subscribers
event_tx: tokio::sync::broadcast::Sender<Event>,
/// Settings
settings: crate::config::Settings,
/// HTTP client
client: hyper::Client<HttpsConnector<HttpConnector>, hyper::Body>,
/// After all accounts are updated, wait this long before checking again.
wait_after_finish: Duration,
/// Minimum amount of time between HTTP queries
http_wait_duration: Duration,
/// Interval for updating verification records
reverify_interval: Interval,
}
/// A NIP-05 identifier is a local part and domain.
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct Nip05Name {
pub local: String,
pub domain: String,
}
impl Nip05Name {
/// Does this name represent the entire domain?
#[must_use]
pub fn is_domain_only(&self) -> bool {
self.local == "_"
}
/// Determine the URL to query for verification
fn to_url(&self) -> Option<http::Uri> {
format!(
"https://{}/.well-known/nostr.json?name={}",
self.domain, self.local
)
.parse::<http::Uri>()
.ok()
}
}
// Parsing Nip05Names from strings
impl std::convert::TryFrom<&str> for Nip05Name {
type Error = Error;
fn try_from(inet: &str) -> Result<Self, Self::Error> {
// break full name at the @ boundary.
let components: Vec<&str> = inet.split('@').collect();
if components.len() == 2 {
// check if local name is valid
let local = components[0];
let domain = components[1];
if local
.chars()
.all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.')
{
if domain
.chars()
.all(|x| x.is_alphanumeric() || x == '-' || x == '.')
{
Ok(Nip05Name {
local: local.to_owned(),
domain: domain.to_owned(),
})
} else {
Err(Error::CustomError(
"invalid character in domain part".to_owned(),
))
}
} else {
Err(Error::CustomError(
"invalid character in local part".to_owned(),
))
}
} else {
Err(Error::CustomError("too many/few components".to_owned()))
}
}
}
impl std::fmt::Display for Nip05Name {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}@{}", self.local, self.domain)
}
}
/// Check if the specified username and address are present and match in this response body
fn body_contains_user(username: &str, address: &str, bytes: &hyper::body::Bytes) -> Result<bool> {
// convert the body into json
let body: serde_json::Value = serde_json::from_slice(bytes)?;
// ensure we have a names object.
let names_map = body
.as_object()
.and_then(|x| x.get("names"))
.and_then(serde_json::Value::as_object)
.ok_or_else(|| Error::CustomError("not a map".to_owned()))?;
// get the pubkey for the requested user
let check_name = names_map.get(username).and_then(serde_json::Value::as_str);
// ensure the address is a match
Ok(check_name == Some(address))
}
impl Verifier {
pub fn new(
repo: Arc<dyn NostrRepo>,
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
event_tx: tokio::sync::broadcast::Sender<Event>,
settings: crate::config::Settings,
) -> Result<Self> {
info!("creating NIP-05 verifier");
// setup hyper client
let https = hyper_rustls::HttpsConnectorBuilder::new()
.with_native_roots()
.https_or_http()
.enable_http1()
.build();
let client = Client::builder().build::<_, hyper::Body>(https);
// After all accounts have been re-verified, don't check again
// for this long.
let wait_after_finish = Duration::from_secs(60 * 10);
// when we have an active queue of accounts to validate, we
// will wait this duration between HTTP requests.
let http_wait_duration = Duration::from_secs(1);
// setup initial interval for re-verification. If we find
// there is no work to be done, it will be reset to a longer
// duration.
let reverify_interval = tokio::time::interval(http_wait_duration);
Ok(Verifier {
repo,
metadata_rx,
event_tx,
settings,
client,
wait_after_finish,
http_wait_duration,
reverify_interval,
})
}
/// Perform web verification against a NIP-05 name and address.
pub async fn get_web_verification(
&mut self,
nip: &Nip05Name,
pubkey: &str,
) -> UserWebVerificationStatus {
self.get_web_verification_res(nip, pubkey)
.await
.unwrap_or(UserWebVerificationStatus::Unknown)
}
/// Perform web verification against an `Event` (must be metadata).
pub async fn get_web_verification_from_event(
&mut self,
e: &Event,
) -> UserWebVerificationStatus {
let nip_parse = e.get_nip05_addr();
if let Some(nip) = nip_parse {
self.get_web_verification_res(&nip, &e.pubkey)
.await
.unwrap_or(UserWebVerificationStatus::Unknown)
} else {
UserWebVerificationStatus::Unknown
}
}
/// Perform web verification, with a `Result` return.
async fn get_web_verification_res(
&mut self,
nip: &Nip05Name,
pubkey: &str,
) -> Result<UserWebVerificationStatus> {
// determine if this domain should be checked
if !is_domain_allowed(
&nip.domain,
&self.settings.verified_users.domain_whitelist,
&self.settings.verified_users.domain_blacklist,
) {
return Ok(UserWebVerificationStatus::DomainNotAllowed);
}
let url = nip
.to_url()
.ok_or_else(|| Error::CustomError("invalid NIP-05 URL".to_owned()))?;
let req = hyper::Request::builder()
.method(hyper::Method::GET)
.uri(url.clone())
.header("Accept", "application/json")
.header(
"User-Agent",
format!(
"nostr-rs-relay/{} NIP-05 Verifier",
crate::info::CARGO_PKG_VERSION.unwrap()
),
)
.body(hyper::Body::empty())
.expect("request builder");
let response_fut = self.client.request(req);
if let Ok(response_res) = tokio::time::timeout(Duration::from_secs(5), response_fut).await {
// limit size of verification document to 1MB.
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
let response = response_res?;
let status = response.status();
// Log non-2XX status codes
if !status.is_success() {
info!(
"unexpected status code {} received for account {:?} at URL: {}",
status,
nip.to_string(),
url
);
return Ok(UserWebVerificationStatus::Unknown);
}
// determine content length from response
let response_content_length = match response.body().size_hint().upper() {
Some(v) => v,
None => {
info!(
"missing content length header for account {:?} at URL: {}",
nip.to_string(),
url
);
return Ok(UserWebVerificationStatus::Unknown);
}
};
if response_content_length > MAX_ALLOWED_RESPONSE_SIZE {
info!(
"content length {} exceeded limit of {} bytes for account {:?} at URL: {}",
response_content_length,
MAX_ALLOWED_RESPONSE_SIZE,
nip.to_string(),
url
);
return Ok(UserWebVerificationStatus::Unknown);
}
let (parts, body) = response.into_parts();
// TODO: consider redirects
if parts.status == http::StatusCode::OK {
// parse body, determine if the username / key / address is present
let body_bytes = match hyper::body::to_bytes(body).await {
Ok(bytes) => bytes,
Err(e) => {
info!(
"failed to read response body for account {:?} at URL: {}: {:?}",
nip.to_string(),
url,
e
);
return Ok(UserWebVerificationStatus::Unknown);
}
};
match body_contains_user(&nip.local, pubkey, &body_bytes) {
Ok(true) => Ok(UserWebVerificationStatus::Verified),
Ok(false) => Ok(UserWebVerificationStatus::Unverified),
Err(e) => {
info!(
"error parsing response body for account {:?}: {:?}",
nip.to_string(),
e
);
Ok(UserWebVerificationStatus::Unknown)
}
}
} else {
info!(
"unexpected status code {} for account {:?}",
parts.status,
nip.to_string()
);
Ok(UserWebVerificationStatus::Unknown)
}
} else {
info!("timeout verifying account {:?}", nip);
Ok(UserWebVerificationStatus::Unknown)
}
}
/// Perform NIP-05 verifier tasks.
pub async fn run(&mut self) {
// use this to schedule periodic re-validation tasks
// run a loop, restarting on failure
loop {
let res = self.run_internal().await;
match res {
Err(Error::ChannelClosed) => {
// channel was closed, we are shutting down
return;
}
Err(e) => {
info!("error in verifier: {:?}", e);
}
_ => {}
}
}
}
/// Internal select loop for performing verification
async fn run_internal(&mut self) -> Result<()> {
tokio::select! {
m = self.metadata_rx.recv() => {
match m {
Ok(e) => {
if let Some(naddr) = e.get_nip05_addr() {
info!("got metadata event for ({:?},{:?})", naddr.to_string() ,e.get_author_prefix());
// Process a new author, checking if they are verified:
let check_verified = self.repo.get_latest_user_verification(&e.pubkey).await;
// ensure the event we got is more recent than the one we have, otherwise we can ignore it.
if let Ok(last_check) = check_verified {
if e.created_at <= last_check.event_created {
// this metadata is from the same author as an existing verification.
// it is older than what we have, so we can ignore it.
debug!("received older metadata event for author {:?}", e.get_author_prefix());
return Ok(());
}
}
// old, or no existing record for this user. In either case, we just create a new one.
let start = Instant::now();
let v = self.get_web_verification_from_event(&e).await;
info!(
"checked name {:?}, result: {:?}, in: {:?}",
naddr.to_string(),
v,
start.elapsed()
);
// sleep to limit how frequently we make HTTP requests for new metadata events. This should limit us to 4 req/sec.
tokio::time::sleep(Duration::from_millis(250)).await;
// if this user was verified, we need to write the
// record, persist the event, and broadcast.
if let UserWebVerificationStatus::Verified = v {
self.create_new_verified_user(&naddr.to_string(), &e).await?;
}
}
},
Err(tokio::sync::broadcast::error::RecvError::Lagged(c)) => {
warn!("incoming metadata events overwhelmed buffer, {} events dropped",c);
}
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
info!("metadata broadcast channel closed");
return Err(Error::ChannelClosed);
}
}
},
_ = self.reverify_interval.tick() => {
// check and see if there is an old account that needs
// to be reverified
self.do_reverify().await?;
},
}
Ok(())
}
/// Reverify the oldest user verification record.
async fn do_reverify(&mut self) -> Result<()> {
let reverify_setting = self
.settings
.verified_users
.verify_update_frequency_duration;
let max_failures = self.settings.verified_users.max_consecutive_failures;
// get from settings, but default to 6hrs between re-checking an account
let reverify_dur = reverify_setting.unwrap_or_else(|| Duration::from_secs(60 * 60 * 6));
// find all verification records that have success or failure OLDER than the reverify_dur.
let now = SystemTime::now();
let earliest = now - reverify_dur;
let earliest_epoch = earliest
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
.unwrap_or(0);
let vr = self.repo.get_oldest_user_verification(earliest_epoch).await;
match vr {
Ok(ref v) => {
let new_status = self.get_web_verification(&v.name, &v.address).await;
match new_status {
UserWebVerificationStatus::Verified => {
// freshly verified account, update the
// timestamp.
self.repo.update_verification_timestamp(v.rowid).await?;
info!("verification updated for {}", v.to_string());
}
UserWebVerificationStatus::DomainNotAllowed
| UserWebVerificationStatus::Unknown => {
// server may be offline, or temporarily
// blocked by the config file. Note the
// failure so we can process something
// else.
// have we had enough failures to give up?
if v.failure_count >= max_failures as u64 {
info!(
"giving up on verifying {:?} after {} failures",
v.name, v.failure_count
);
self.repo.delete_verification(v.rowid).await?;
} else {
// record normal failure, incrementing failure count
info!("verification failed for {}", v.to_string());
self.repo.fail_verification(v.rowid).await?;
}
}
UserWebVerificationStatus::Unverified => {
// domain has removed the verification, drop
// the record on our side.
info!("verification rescinded for {}", v.to_string());
self.repo.delete_verification(v.rowid).await?;
}
}
}
Err(
Error::SqlError(rusqlite::Error::QueryReturnedNoRows)
| Error::SqlxError(sqlx::Error::RowNotFound),
) => {
// No users need verification. Reset the interval to
// the next verification attempt.
let start = tokio::time::Instant::now() + self.wait_after_finish;
self.reverify_interval = tokio::time::interval_at(start, self.http_wait_duration);
}
Err(ref e) => {
warn!(
"Error when checking for NIP-05 verification records: {:?}",
e
);
}
}
Ok(())
}
/// Persist an event, create a verification record, and broadcast.
// TODO: have more event-writing logic handled in the db module.
// Right now, these events avoid the rate limit. That is
// acceptable since as soon as the user is registered, this path
// is no longer used.
// TODO: refactor these into spawn_blocking
// calls to get them off the async executors.
async fn create_new_verified_user(&mut self, name: &str, event: &Event) -> Result<()> {
let start = Instant::now();
// we should only do this if we are enabled. if we are
// disabled/passive, the event has already been persisted.
let should_write_event = self.settings.verified_users.is_enabled();
if should_write_event {
match self.repo.write_event(event).await {
Ok(updated) => {
if updated != 0 {
info!(
"persisted event (new verified pubkey): {:?} in {:?}",
event.get_event_id_prefix(),
start.elapsed()
);
self.event_tx.send(event.clone()).ok();
}
}
Err(err) => {
warn!("event insert failed: {:?}", err);
if let Error::SqlError(r) = err {
warn!("because: : {:?}", r);
}
}
}
}
// write the verification record
self.repo
.create_verification_record(&event.id, name)
.await?;
Ok(())
}
}
/// Result of checking user's verification status against DNS/HTTP.
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum UserWebVerificationStatus {
Verified, // user is verified, as of now.
DomainNotAllowed, // domain blacklist or whitelist denied us from attempting a verification
Unknown, // user's status could not be determined (timeout, server error)
Unverified, // user's status is not verified (successful check, name / addr do not match)
}
/// A NIP-05 verification record.
#[derive(PartialEq, Eq, Debug, Clone)]
// Basic information for a verification event. Gives us all we need to assert a NIP-05 address is good.
pub struct VerificationRecord {
pub rowid: u64, // database row for this verification event
pub name: Nip05Name, // address being verified
pub address: String, // pubkey
pub event: String, // event ID hash providing the verification
pub event_created: u64, // when the metadata event was published
pub last_success: Option<u64>, // the most recent time a verification was provided. None if verification under this name has never succeeded.
pub last_failure: Option<u64>, // the most recent time verification was attempted, but could not be completed.
pub failure_count: u64, // how many consecutive failures have been observed.
}
/// Check with settings to determine if a given domain is allowed to
/// publish.
#[must_use]
pub fn is_domain_allowed(
domain: &str,
whitelist: &Option<Vec<String>>,
blacklist: &Option<Vec<String>>,
) -> bool {
// if there is a whitelist, domain must be present in it.
if let Some(wl) = whitelist {
// workaround for Vec contains not accepting &str
return wl.iter().any(|x| x == domain);
}
// otherwise, check that user is not in the blacklist
if let Some(bl) = blacklist {
return !bl.iter().any(|x| x == domain);
}
true
}
impl VerificationRecord {
/// Check if the record is recent enough to be considered valid,
/// and the domain is allowed.
#[must_use]
pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
//let settings = SETTINGS.read().unwrap();
// how long a verification record is good for
let nip05_expiration = &verified_users_settings.verify_expiration_duration;
if let Some(e) = nip05_expiration {
if !self.is_current(e) {
return false;
}
}
// check domains
is_domain_allowed(
&self.name.domain,
&verified_users_settings.domain_whitelist,
&verified_users_settings.domain_blacklist,
)
}
/// Check if this record has been validated since the given
/// duration.
fn is_current(&self, d: &Duration) -> bool {
match self.last_success {
Some(s) => {
// current time - duration
let now = SystemTime::now();
let cutoff = now - *d;
let cutoff_epoch = cutoff
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
.unwrap_or(0);
s > cutoff_epoch
}
None => false,
}
}
}
impl std::fmt::Display for VerificationRecord {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"({:?},{:?})",
self.name.to_string(),
self.address.chars().take(8).collect::<String>()
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn local_from_inet() {
let addr = "bob@example.com";
let parsed = Nip05Name::try_from(addr);
assert!(parsed.is_ok());
let v = parsed.unwrap();
assert_eq!(v.local, "bob");
assert_eq!(v.domain, "example.com");
}
#[test]
fn not_enough_sep() {
let addr = "bob_example.com";
let parsed = Nip05Name::try_from(addr);
assert!(parsed.is_err());
}
#[test]
fn too_many_sep() {
let addr = "foo@bob@example.com";
let parsed = Nip05Name::try_from(addr);
assert!(parsed.is_err());
}
#[test]
fn invalid_local_name() {
// non-permitted ascii chars
assert!(Nip05Name::try_from("foo!@example.com").is_err());
assert!(Nip05Name::try_from("foo @example.com").is_err());
assert!(Nip05Name::try_from(" foo@example.com").is_err());
assert!(Nip05Name::try_from("f oo@example.com").is_err());
assert!(Nip05Name::try_from("foo<@example.com").is_err());
// unicode dash
assert!(Nip05Name::try_from("foobar@example.com").is_err());
// emoji
assert!(Nip05Name::try_from("foo😭bar@example.com").is_err());
}
#[test]
fn invalid_domain_name() {
// non-permitted ascii chars
assert!(Nip05Name::try_from("foo@examp!e.com").is_err());
assert!(Nip05Name::try_from("foo@ example.com").is_err());
assert!(Nip05Name::try_from("foo@exa mple.com").is_err());
assert!(Nip05Name::try_from("foo@example .com").is_err());
assert!(Nip05Name::try_from("foo@exa<mple.com").is_err());
// unicode dash
assert!(Nip05Name::try_from("foobar@example.com").is_err());
// emoji
assert!(Nip05Name::try_from("foobar@ex😭ample.com").is_err());
}
#[test]
fn to_url() {
let nip = Nip05Name::try_from("foobar@example.com").unwrap();
assert_eq!(
nip.to_url(),
Some(
"https://example.com/.well-known/nostr.json?name=foobar"
.parse()
.unwrap()
)
);
}
}

101
src/notice.rs Normal file

@@ -0,0 +1,101 @@
pub enum EventResultStatus {
Saved,
Duplicate,
Invalid,
Blocked,
RateLimited,
Error,
Restricted,
}
pub struct EventResult {
pub id: String,
pub msg: String,
pub status: EventResultStatus,
}
pub enum Notice {
Message(String),
EventResult(EventResult),
AuthChallenge(String),
}
impl EventResultStatus {
#[must_use]
pub fn to_bool(&self) -> bool {
match self {
Self::Duplicate | Self::Saved => true,
Self::Invalid | Self::Blocked | Self::RateLimited | Self::Error | Self::Restricted => {
false
}
}
}
#[must_use]
pub fn prefix(&self) -> &'static str {
match self {
Self::Saved => "saved",
Self::Duplicate => "duplicate",
Self::Invalid => "invalid",
Self::Blocked => "blocked",
Self::RateLimited => "rate-limited",
Self::Error => "error",
Self::Restricted => "restricted",
}
}
}
impl Notice {
//pub fn err(err: error::Error, id: String) -> Notice {
// Notice::err_msg(format!("{}", err), id)
//}
#[must_use]
pub fn message(msg: String) -> Notice {
Notice::Message(msg)
}
fn prefixed(id: String, msg: &str, status: EventResultStatus) -> Notice {
let msg = format!("{}: {}", status.prefix(), msg);
Notice::EventResult(EventResult { id, msg, status })
}
#[must_use]
pub fn invalid(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Invalid)
}
#[must_use]
pub fn blocked(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Blocked)
}
#[must_use]
pub fn rate_limited(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::RateLimited)
}
#[must_use]
pub fn duplicate(id: String) -> Notice {
Notice::prefixed(id, "", EventResultStatus::Duplicate)
}
#[must_use]
pub fn error(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Error)
}
#[must_use]
pub fn restricted(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Restricted)
}
#[must_use]
pub fn saved(id: String) -> Notice {
Notice::EventResult(EventResult {
id,
msg: "".into(),
status: EventResultStatus::Saved,
})
}
}

137
src/payment/cln_rest.rs Normal file

@@ -0,0 +1,137 @@
use std::{fs, str::FromStr};
use async_trait::async_trait;
use cln_rpc::{
model::{
requests::InvoiceRequest,
responses::{InvoiceResponse, ListinvoicesInvoicesStatus, ListinvoicesResponse},
},
primitives::{Amount, AmountOrAny},
};
use config::ConfigError;
use http::{header::CONTENT_TYPE, HeaderValue, Uri};
use hyper::{client::HttpConnector, Client};
use hyper_rustls::HttpsConnector;
use nostr::Keys;
use rand::random;
use crate::{
config::Settings,
error::{Error, Result},
};
use super::{InvoiceInfo, InvoiceStatus, PaymentProcessor};
#[derive(Clone)]
pub struct ClnRestPaymentProcessor {
client: hyper::Client<HttpsConnector<HttpConnector>, hyper::Body>,
settings: Settings,
rune_header: HeaderValue,
}
impl ClnRestPaymentProcessor {
pub fn new(settings: &Settings) -> Result<Self> {
let rune_path = settings
.pay_to_relay
.rune_path
.clone()
.ok_or(ConfigError::NotFound("rune_path".to_string()))?;
let rune = String::from_utf8(fs::read(rune_path)?)
.map_err(|_| ConfigError::Message("Rune should be UTF8".to_string()))?;
let mut rune_header = HeaderValue::from_str(rune.trim())
.map_err(|_| ConfigError::Message("Invalid Rune header".to_string()))?;
rune_header.set_sensitive(true);
let https = hyper_rustls::HttpsConnectorBuilder::new()
.with_native_roots()
.https_only()
.enable_http1()
.build();
let client = Client::builder().build::<_, hyper::Body>(https);
Ok(Self {
client,
settings: settings.clone(),
rune_header,
})
}
}
#[async_trait]
impl PaymentProcessor for ClnRestPaymentProcessor {
async fn get_invoice(&self, key: &Keys, amount: u64) -> Result<InvoiceInfo, Error> {
let random_number: u16 = random();
let memo = format!("{}: {}", random_number, key.public_key());
let body = InvoiceRequest {
cltv: None,
deschashonly: None,
expiry: None,
preimage: None,
exposeprivatechannels: None,
fallbacks: None,
amount_msat: AmountOrAny::Amount(Amount::from_sat(amount)),
description: memo.clone(),
label: "Nostr".to_string(),
};
let uri = Uri::from_str(&format!(
"{}/v1/invoice",
&self.settings.pay_to_relay.node_url
))
.map_err(|_| ConfigError::Message("Bad node URL".to_string()))?;
let req = hyper::Request::builder()
.method(hyper::Method::POST)
.uri(uri)
.header(CONTENT_TYPE, HeaderValue::from_static("application/json"))
.header("Rune", self.rune_header.clone())
.body(hyper::Body::from(serde_json::to_string(&body)?))
.expect("request builder");
let res = self.client.request(req).await?;
let body = hyper::body::to_bytes(res.into_body()).await?;
let invoice_response: InvoiceResponse = serde_json::from_slice(&body)?;
Ok(InvoiceInfo {
pubkey: key.public_key().to_string(),
payment_hash: invoice_response.payment_hash.to_string(),
bolt11: invoice_response.bolt11,
amount,
memo,
status: InvoiceStatus::Unpaid,
confirmed_at: None,
})
}
async fn check_invoice(&self, payment_hash: &str) -> Result<InvoiceStatus, Error> {
let uri = Uri::from_str(&format!(
"{}/v1/listinvoices?payment_hash={}",
&self.settings.pay_to_relay.node_url, payment_hash
))
.map_err(|_| ConfigError::Message("Bad node URL".to_string()))?;
let req = hyper::Request::builder()
.method(hyper::Method::POST)
.uri(uri)
.header(CONTENT_TYPE, HeaderValue::from_static("application/json"))
.header("Rune", self.rune_header.clone())
.body(hyper::Body::empty())
.expect("request builder");
let res = self.client.request(req).await?;
let body = hyper::body::to_bytes(res.into_body()).await?;
let invoice_response: ListinvoicesResponse = serde_json::from_slice(&body)?;
let invoice = invoice_response
.invoices
.first()
.ok_or(Error::CustomError("Invoice not found".to_string()))?;
let status = match invoice.status {
ListinvoicesInvoicesStatus::PAID => InvoiceStatus::Paid,
ListinvoicesInvoicesStatus::UNPAID => InvoiceStatus::Unpaid,
ListinvoicesInvoicesStatus::EXPIRED => InvoiceStatus::Expired,
};
Ok(status)
}
}

176
src/payment/lnbits.rs Normal file

@@ -0,0 +1,176 @@
//! LNBits payment processor
use http::Uri;
use hyper::client::connect::HttpConnector;
use hyper::Client;
use hyper_rustls::HttpsConnector;
use nostr::Keys;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use async_trait::async_trait;
use rand::Rng;
use std::str::FromStr;
use url::Url;
use crate::{config::Settings, error::Error};
use super::{InvoiceInfo, InvoiceStatus, PaymentProcessor};
const APIPATH: &str = "/api/v1/payments/";
/// Info LNBits expects in create invoice request
#[derive(Serialize, Deserialize, Debug)]
pub struct LNBitsCreateInvoice {
out: bool,
amount: u64,
memo: String,
webhook: String,
unit: String,
internal: bool,
expiry: u64,
}
/// Invoice response for LN bits
#[derive(Debug, Serialize, Deserialize)]
pub struct LNBitsCreateInvoiceResponse {
payment_hash: String,
payment_request: String,
}
/// LNBits call back response
/// Used when an invoice is paid
/// lnbits to post the status change to relay
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LNBitsCallback {
pub checking_id: String,
pub pending: bool,
pub amount: u64,
pub memo: String,
pub time: u64,
pub bolt11: String,
pub preimage: String,
pub payment_hash: String,
pub wallet_id: String,
pub webhook: String,
pub webhook_status: Option<String>,
}
/// LN Bits repose for check invoice endpoint
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LNBitsCheckInvoiceResponse {
paid: bool,
}
#[derive(Clone)]
pub struct LNBitsPaymentProcessor {
/// HTTP client
client: hyper::Client<HttpsConnector<HttpConnector>, hyper::Body>,
settings: Settings,
}
impl LNBitsPaymentProcessor {
pub fn new(settings: &Settings) -> Self {
// setup hyper client
let https = hyper_rustls::HttpsConnectorBuilder::new()
.with_native_roots()
.https_only()
.enable_http1()
.build();
let client = Client::builder().build::<_, hyper::Body>(https);
Self {
client,
settings: settings.clone(),
}
}
}
#[async_trait]
impl PaymentProcessor for LNBitsPaymentProcessor {
/// Calls LNBits api to ger new invoice
async fn get_invoice(&self, key: &Keys, amount: u64) -> Result<InvoiceInfo, Error> {
let random_number: u16 = rand::thread_rng().gen();
let memo = format!("{}: {}", random_number, key.public_key());
let callback_url = Url::parse(
&self
.settings
.info
.relay_url
.clone()
.unwrap()
.replace("ws", "http"),
)?
.join("lnbits")?;
let body = LNBitsCreateInvoice {
out: false,
amount,
memo: memo.clone(),
webhook: callback_url.to_string(),
unit: "sat".to_string(),
internal: false,
expiry: 3600,
};
let url = Url::parse(&self.settings.pay_to_relay.node_url)?.join(APIPATH)?;
let uri = Uri::from_str(url.as_str().strip_suffix('/').unwrap_or(url.as_str())).unwrap();
let req = hyper::Request::builder()
.method(hyper::Method::POST)
.uri(uri)
.header("X-Api-Key", &self.settings.pay_to_relay.api_secret)
.body(hyper::Body::from(serde_json::to_string(&body)?))
.expect("request builder");
let res = self.client.request(req).await?;
// Json to Struct of LNbits callback
let body = hyper::body::to_bytes(res.into_body()).await?;
let invoice_response: LNBitsCreateInvoiceResponse = serde_json::from_slice(&body)?;
Ok(InvoiceInfo {
pubkey: key.public_key().to_string(),
payment_hash: invoice_response.payment_hash,
bolt11: invoice_response.payment_request,
amount,
memo,
status: InvoiceStatus::Unpaid,
confirmed_at: None,
})
}
/// Calls LNBits Api to check the payment status of invoice
async fn check_invoice(&self, payment_hash: &str) -> Result<InvoiceStatus, Error> {
let url = Url::parse(&self.settings.pay_to_relay.node_url)?
.join(APIPATH)?
.join(payment_hash)?;
let uri = Uri::from_str(url.as_str()).unwrap();
let req = hyper::Request::builder()
.method(hyper::Method::GET)
.uri(uri)
.header("X-Api-Key", &self.settings.pay_to_relay.api_secret)
.body(hyper::Body::empty())
.expect("request builder");
let res = self.client.request(req).await?;
// Json to Struct of LNbits callback
let body = hyper::body::to_bytes(res.into_body()).await?;
let invoice_response: Value = serde_json::from_slice(&body)?;
let status = if let Ok(invoice_response) =
serde_json::from_value::<LNBitsCheckInvoiceResponse>(invoice_response)
{
if invoice_response.paid {
InvoiceStatus::Paid
} else {
InvoiceStatus::Unpaid
}
} else {
InvoiceStatus::Expired
};
Ok(status)
}
}

280
src/payment/mod.rs Normal file

@@ -0,0 +1,280 @@
use crate::error::{Error, Result};
use crate::event::Event;
use crate::payment::cln_rest::ClnRestPaymentProcessor;
use crate::payment::lnbits::LNBitsPaymentProcessor;
use crate::repo::NostrRepo;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tracing::{info, warn};
use async_trait::async_trait;
use nostr::key::{FromPkStr, FromSkStr};
use nostr::{key::Keys, Event as NostrEvent, EventBuilder};
pub mod cln_rest;
pub mod lnbits;
/// Payment handler
pub struct Payment {
/// Repository for saving/retrieving events and events
repo: Arc<dyn NostrRepo>,
/// Newly validated events get written and then broadcast on this channel to subscribers
event_tx: tokio::sync::broadcast::Sender<Event>,
/// Payment message sender
payment_tx: tokio::sync::broadcast::Sender<PaymentMessage>,
/// Payment message receiver
payment_rx: tokio::sync::broadcast::Receiver<PaymentMessage>,
/// Settings
settings: crate::config::Settings,
// Nostr Keys
nostr_keys: Option<Keys>,
/// Payment Processor
processor: Arc<dyn PaymentProcessor>,
}
#[async_trait]
pub trait PaymentProcessor: Send + Sync {
/// Get invoice from processor
async fn get_invoice(&self, keys: &Keys, amount: u64) -> Result<InvoiceInfo, Error>;
/// Check payment status of an invoice
async fn check_invoice(&self, payment_hash: &str) -> Result<InvoiceStatus, Error>;
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub enum Processor {
LNBits,
ClnRest,
}
/// Possible states of an invoice
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, sqlx::Type)]
#[sqlx(type_name = "status")]
pub enum InvoiceStatus {
Unpaid,
Paid,
Expired,
}
impl std::fmt::Display for InvoiceStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
InvoiceStatus::Paid => write!(f, "Paid"),
InvoiceStatus::Unpaid => write!(f, "Unpaid"),
InvoiceStatus::Expired => write!(f, "Expired"),
}
}
}
/// Invoice information
#[derive(Debug, Clone)]
pub struct InvoiceInfo {
pub pubkey: String,
pub payment_hash: String,
pub bolt11: String,
pub amount: u64,
pub status: InvoiceStatus,
pub memo: String,
pub confirmed_at: Option<u64>,
}
/// Message variants for the payment channel
#[derive(Debug, Clone)]
pub enum PaymentMessage {
/// New account
NewAccount(String),
/// Check account,
CheckAccount(String),
/// Account Admitted
AccountAdmitted(String),
/// Invoice generated
Invoice(String, InvoiceInfo),
/// Invoice call back
/// Payment hash is passed
// This may have to be changed to better support other processors
InvoicePaid(String),
}
impl Payment {
pub fn new(
repo: Arc<dyn NostrRepo>,
payment_tx: tokio::sync::broadcast::Sender<PaymentMessage>,
payment_rx: tokio::sync::broadcast::Receiver<PaymentMessage>,
event_tx: tokio::sync::broadcast::Sender<Event>,
settings: crate::config::Settings,
) -> Result<Self> {
info!("Create payment handler");
// Create nostr key from sk string
let nostr_keys = if let Some(secret_key) = &settings.pay_to_relay.secret_key {
Some(Keys::from_sk_str(secret_key)?)
} else {
None
};
// Create processor kind defined in settings
let processor: Arc<dyn PaymentProcessor> = match &settings.pay_to_relay.processor {
Processor::LNBits => Arc::new(LNBitsPaymentProcessor::new(&settings)),
Processor::ClnRest => Arc::new(ClnRestPaymentProcessor::new(&settings)?),
};
Ok(Payment {
repo,
payment_tx,
payment_rx,
event_tx,
settings,
nostr_keys,
processor,
})
}
/// Perform Payment tasks
pub async fn run(&mut self) {
loop {
let res = self.run_internal().await;
if let Err(e) = res {
info!("error in payment: {:?}", e);
}
}
}
/// Internal select loop for preforming payment operations
async fn run_internal(&mut self) -> Result<()> {
tokio::select! {
m = self.payment_rx.recv() => {
match m {
Ok(PaymentMessage::NewAccount(pubkey)) => {
info!("payment event for {:?}", pubkey);
// REVIEW: This will need to change for cost per event
let amount = self.settings.pay_to_relay.admission_cost;
let invoice_info = self.get_invoice_info(&pubkey, amount).await?;
// TODO: should handle this error
self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok();
},
// Gets the most recent unpaid invoice from database
// Checks LNbits to verify if paid/unpaid
Ok(PaymentMessage::CheckAccount(pubkey)) => {
let keys = Keys::from_pk_str(&pubkey)?;
if let Ok(Some(invoice_info)) = self.repo.get_unpaid_invoice(&keys).await {
match self.check_invoice_status(&invoice_info.payment_hash).await? {
InvoiceStatus::Paid => {
self.repo.admit_account(&keys, self.settings.pay_to_relay.admission_cost).await?;
self.payment_tx.send(PaymentMessage::AccountAdmitted(pubkey)).ok();
}
_ => {
self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok();
}
}
} else {
let amount = self.settings.pay_to_relay.admission_cost;
let invoice_info = self.get_invoice_info(&pubkey, amount).await?;
self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok();
}
}
Ok(PaymentMessage::InvoicePaid(payment_hash)) => {
if self.check_invoice_status(&payment_hash).await?.eq(&InvoiceStatus::Paid) {
let pubkey = self.repo
.update_invoice(&payment_hash, InvoiceStatus::Paid)
.await?;
let key = Keys::from_pk_str(&pubkey)?;
self.repo.admit_account(&key, self.settings.pay_to_relay.admission_cost).await?;
}
}
Ok(_) => {
// For this variant nothing need to be done here
// it is used by `server`
}
Err(err) => warn!("Payment RX: {err}")
}
}
}
Ok(())
}
/// Sends Nostr DM to pubkey that requested invoice
/// Two events the terms followed by the bolt11 invoice
pub async fn send_admission_message(
&self,
pubkey: &str,
invoice_info: &InvoiceInfo,
) -> Result<()> {
let nostr_keys = match &self.nostr_keys {
Some(key) => key,
None => return Err(Error::CustomError("Nostr key not defined".to_string())),
};
// Create Nostr key from pk
let key = Keys::from_pk_str(pubkey)?;
let pubkey = key.public_key();
// Event DM with terms of service
let message_event: NostrEvent = EventBuilder::new_encrypted_direct_msg(
nostr_keys,
pubkey,
&self.settings.pay_to_relay.terms_message,
)?
.to_event(nostr_keys)?;
// Event DM with invoice
let invoice_event: NostrEvent =
EventBuilder::new_encrypted_direct_msg(nostr_keys, pubkey, &invoice_info.bolt11)?
.to_event(nostr_keys)?;
// Persist DM events to DB
self.repo.write_event(&message_event.clone().into()).await?;
self.repo.write_event(&invoice_event.clone().into()).await?;
// Broadcast DM events
self.event_tx.send(message_event.clone().into()).ok();
self.event_tx.send(invoice_event.clone().into()).ok();
Ok(())
}
/// Get Invoice Info
/// If the has an active invoice that will be return
/// Otherwise a new invoice will be generated by the payment processor
pub async fn get_invoice_info(&self, pubkey: &str, amount: u64) -> Result<InvoiceInfo> {
// If user is already in DB this will be false
// This avoids recreating admission invoices
// I think it will continue to send DMs with the invoice
// If client continues to try and write to the relay (will be same invoice)
let key = Keys::from_pk_str(pubkey)?;
if !self.repo.create_account(&key).await? {
if let Ok(Some(invoice_info)) = self.repo.get_unpaid_invoice(&key).await {
return Ok(invoice_info);
}
}
let key = Keys::from_pk_str(pubkey)?;
let invoice_info = self.processor.get_invoice(&key, amount).await?;
// Persist invoice to DB
self.repo
.create_invoice_record(&key, invoice_info.clone())
.await?;
if self.settings.pay_to_relay.direct_message {
// Admission event invoice and terms to pubkey that is joining
self.send_admission_message(pubkey, &invoice_info).await?;
}
Ok(invoice_info)
}
/// Check paid status of invoice with LNbits
pub async fn check_invoice_status(&self, payment_hash: &str) -> Result<InvoiceStatus, Error> {
// Check base if passed expiry time
let status = self.processor.check_invoice(payment_hash).await?;
self.repo
.update_invoice(payment_hash, status.clone())
.await?;
Ok(status)
}
}

@@ -1,139 +0,0 @@
//! Nostr protocol layered over WebSocket
use crate::close::CloseCmd;
use crate::config;
use crate::error::{Error, Result};
use crate::event::EventCmd;
use crate::subscription::Subscription;
use core::pin::Pin;
use futures::sink::Sink;
use futures::stream::Stream;
use futures::task::Context;
use futures::task::Poll;
use hyper::upgrade::Upgraded;
use log::*;
use serde::{Deserialize, Serialize};
use tokio_tungstenite::WebSocketStream;
use tungstenite::error::Error as WsError;
use tungstenite::protocol::Message;
/// Nostr protocol messages from a client
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
#[serde(untagged)]
pub enum NostrMessage {
/// An `EVENT` message
EventMsg(EventCmd),
/// A `REQ` message
SubMsg(Subscription),
/// A `CLOSE` message
CloseMsg(CloseCmd),
}
/// Nostr protocol messages from a relay/server
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
pub enum NostrResponse {
/// A `NOTICE` response
NoticeRes(String),
/// An `EVENT` response, composed of the subscription identifier,
/// and serialized event JSON
EventRes(String, String),
}
/// A Nostr protocol stream is layered on top of a Websocket stream.
pub struct NostrStream {
ws_stream: WebSocketStream<Upgraded>,
}
/// Given a websocket, return a protocol stream wrapper.
pub fn wrap_ws_in_nostr(ws: WebSocketStream<Upgraded>) -> NostrStream {
NostrStream { ws_stream: ws }
}
/// Implement the [`Stream`] interface to produce Nostr messages.
impl Stream for NostrStream {
type Item = Result<NostrMessage>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
// get the configuration
/// Convert Message to NostrMessage
fn convert(msg: String) -> Result<NostrMessage> {
let config = config::SETTINGS.read().unwrap();
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
match parsed_res {
Ok(m) => {
if let NostrMessage::EventMsg(_) = m {
if let Some(max_size) = config.limits.max_event_bytes {
// check length, ensure that some max size is set.
if msg.len() > max_size && max_size > 0 {
return Err(Error::EventMaxLengthError(msg.len()));
}
}
}
Ok(m)
}
Err(e) => {
debug!("proto parse error: {:?}", e);
debug!("parse error on message: {}", msg.trim());
Err(Error::ProtoParseError)
}
}
}
match Pin::new(&mut self.ws_stream).poll_next(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(None) => Poll::Ready(None),
Poll::Ready(Some(v)) => match v {
Ok(Message::Text(vs)) => Poll::Ready(Some(convert(vs))),
Ok(Message::Ping(_x)) => {
debug!("client ping");
//Pin::new(&mut self.ws_stream).start_send(Message::Pong(x));
//info!("sent pong");
Poll::Pending
}
Ok(Message::Binary(_)) => Poll::Ready(Some(Err(Error::ProtoParseError))),
Ok(Message::Pong(_)) => Poll::Pending,
Ok(Message::Close(_)) => Poll::Ready(None),
Err(WsError::AlreadyClosed) | Err(WsError::ConnectionClosed) => Poll::Ready(None),
Err(_) => Poll::Ready(Some(Err(Error::ConnError))),
},
}
}
}
/// Implement the [`Sink`] interface to produce Nostr responses.
impl Sink<NostrResponse> for NostrStream {
type Error = Error;
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// map the error type
match Pin::new(&mut self.ws_stream).poll_ready(cx) {
Poll::Ready(Ok(())) => Poll::Ready(Ok(())),
Poll::Ready(Err(_)) => Poll::Ready(Err(Error::ConnWriteError)),
Poll::Pending => Poll::Pending,
}
}
fn start_send(mut self: Pin<&mut Self>, item: NostrResponse) -> Result<(), Self::Error> {
// TODO: do real escaping for these - at least on NOTICE,
// which surely has some problems if arbitrary text is sent.
let send_str = match item {
NostrResponse::NoticeRes(msg) => {
let s = msg.replace("\"", "");
format!("[\"NOTICE\",\"{}\"]", s)
}
NostrResponse::EventRes(sub, eventstr) => {
let subesc = sub.replace("\"", "");
format!("[\"EVENT\",\"{}\",{}]", subesc, eventstr)
}
};
match Pin::new(&mut self.ws_stream).start_send(Message::Text(send_str)) {
Ok(()) => Ok(()),
Err(_) => Err(Error::ConnWriteError),
}
}
fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
}

98
src/repo/mod.rs Normal file

@@ -0,0 +1,98 @@
use crate::db::QueryResult;
use crate::error::Result;
use crate::event::Event;
use crate::nip05::VerificationRecord;
use crate::payment::{InvoiceInfo, InvoiceStatus};
use crate::subscription::Subscription;
use crate::utils::unix_time;
use async_trait::async_trait;
use nostr::Keys;
use rand::Rng;
pub mod postgres;
pub mod postgres_migration;
pub mod sqlite;
pub mod sqlite_migration;
#[async_trait]
pub trait NostrRepo: Send + Sync {
/// Start the repository (any initialization or maintenance tasks can be kicked off here)
async fn start(&self) -> Result<()>;
/// Run migrations and return current version
async fn migrate_up(&self) -> Result<usize>;
/// Persist event to database
async fn write_event(&self, e: &Event) -> Result<u64>;
/// Perform a database query using a subscription.
///
/// The [`Subscription`] is converted into a SQL query. Each result
/// is published on the `query_tx` channel as it is returned. If a
/// message becomes available on the `abandon_query_rx` channel, the
/// query is immediately aborted.
async fn query_subscription(
&self,
sub: Subscription,
client_id: String,
query_tx: tokio::sync::mpsc::Sender<QueryResult>,
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
) -> Result<()>;
/// Perform normal maintenance
async fn optimize_db(&self) -> Result<()>;
/// Create a new verification record connected to a specific event
async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()>;
/// Update verification timestamp
async fn update_verification_timestamp(&self, id: u64) -> Result<()>;
/// Update verification record as failed
async fn fail_verification(&self, id: u64) -> Result<()>;
/// Delete verification record
async fn delete_verification(&self, id: u64) -> Result<()>;
/// Get the latest verification record for a given pubkey.
async fn get_latest_user_verification(&self, pub_key: &str) -> Result<VerificationRecord>;
/// Get oldest verification before timestamp
async fn get_oldest_user_verification(&self, before: u64) -> Result<VerificationRecord>;
/// Create a new account
async fn create_account(&self, pubkey: &Keys) -> Result<bool>;
/// Admit an account
async fn admit_account(&self, pubkey: &Keys, admission_cost: u64) -> Result<()>;
/// Gets user balance if they are an admitted pubkey
async fn get_account_balance(&self, pubkey: &Keys) -> Result<(bool, u64)>;
/// Update account balance
async fn update_account_balance(
&self,
pub_key: &Keys,
positive: bool,
new_balance: u64,
) -> Result<()>;
/// Create invoice record
async fn create_invoice_record(&self, pubkey: &Keys, invoice_info: InvoiceInfo) -> Result<()>;
/// Update Invoice for given payment hash
async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result<String>;
/// Get the most recent invoice for a given pubkey
/// invoice must be unpaid and not expired
async fn get_unpaid_invoice(&self, pubkey: &Keys) -> Result<Option<InvoiceInfo>>;
}
// Current time, with a slight forward jitter in seconds
pub(crate) fn now_jitter(sec: u64) -> u64 {
// random time between now, and 10min in future.
let mut rng = rand::thread_rng();
let jitter_amount = rng.gen_range(0..sec);
let now = unix_time();
now.saturating_add(jitter_amount)
}

1010
src/repo/postgres.rs Normal file

File diff suppressed because it is too large Load Diff

@@ -0,0 +1,320 @@
use crate::repo::postgres::PostgresPool;
use async_trait::async_trait;
use sqlx::{Executor, Postgres, Transaction};
#[async_trait]
pub trait Migration {
fn serial_number(&self) -> i64;
async fn run(&self, tx: &mut Transaction<Postgres>);
}
struct SimpleSqlMigration {
pub serial_number: i64,
pub sql: Vec<&'static str>,
}
#[async_trait]
impl Migration for SimpleSqlMigration {
fn serial_number(&self) -> i64 {
self.serial_number
}
async fn run(&self, tx: &mut Transaction<Postgres>) {
for sql in self.sql.iter() {
tx.execute(*sql).await.unwrap();
}
}
}
/// Execute all migrations on the database.
pub async fn run_migrations(db: &PostgresPool) -> crate::error::Result<usize> {
prepare_migrations_table(db).await;
run_migration(m001::migration(), db).await;
let m002_result = run_migration(m002::migration(), db).await;
if m002_result == MigrationResult::Upgraded {
m002::rebuild_tags(db).await?;
}
run_migration(m003::migration(), db).await;
run_migration(m004::migration(), db).await;
run_migration(m005::migration(), db).await;
Ok(current_version(db).await as usize)
}
async fn current_version(db: &PostgresPool) -> i64 {
sqlx::query_scalar("SELECT max(serial_number) FROM migrations;")
.fetch_one(db)
.await
.unwrap()
}
async fn prepare_migrations_table(db: &PostgresPool) {
sqlx::query("CREATE TABLE IF NOT EXISTS migrations (serial_number bigint)")
.execute(db)
.await
.unwrap();
}
// Running a migration was either unnecessary, or completed
#[derive(PartialEq, Eq, Debug, Clone)]
enum MigrationResult {
Upgraded,
NotNeeded,
}
async fn run_migration(migration: impl Migration, db: &PostgresPool) -> MigrationResult {
let row: i64 =
sqlx::query_scalar("SELECT COUNT(*) AS count FROM migrations WHERE serial_number = $1")
.bind(migration.serial_number())
.fetch_one(db)
.await
.unwrap();
if row > 0 {
return MigrationResult::NotNeeded;
}
let mut transaction = db.begin().await.unwrap();
migration.run(&mut transaction).await;
sqlx::query("INSERT INTO migrations VALUES ($1)")
.bind(migration.serial_number())
.execute(&mut transaction)
.await
.unwrap();
transaction.commit().await.unwrap();
MigrationResult::Upgraded
}
mod m001 {
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
pub const VERSION: i64 = 1;
pub fn migration() -> impl Migration {
SimpleSqlMigration {
serial_number: VERSION,
sql: vec![
r#"
-- Events table
CREATE TABLE "event" (
id bytea NOT NULL,
pub_key bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
kind integer NOT NULL,
"content" bytea NOT NULL,
hidden bit(1) NOT NULL DEFAULT 0::bit(1),
delegated_by bytea NULL,
first_seen timestamp with time zone NOT NULL DEFAULT now(),
CONSTRAINT event_pkey PRIMARY KEY (id)
);
CREATE INDEX event_created_at_idx ON "event" (created_at,kind);
CREATE INDEX event_pub_key_idx ON "event" (pub_key);
CREATE INDEX event_delegated_by_idx ON "event" (delegated_by);
-- Tags table
CREATE TABLE "tag" (
id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY,
event_id bytea NOT NULL,
"name" varchar NOT NULL,
value bytea NOT NULL,
CONSTRAINT tag_fk FOREIGN KEY (event_id) REFERENCES "event"(id) ON DELETE CASCADE
);
CREATE INDEX tag_event_id_idx ON tag USING btree (event_id, name);
CREATE INDEX tag_value_idx ON tag USING btree (value);
-- NIP-05 Verification table
CREATE TABLE "user_verification" (
id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY,
event_id bytea NOT NULL,
"name" varchar NOT NULL,
verified_at timestamptz NULL,
failed_at timestamptz NULL,
fail_count int4 NULL DEFAULT 0,
CONSTRAINT user_verification_pk PRIMARY KEY (id),
CONSTRAINT user_verification_fk FOREIGN KEY (event_id) REFERENCES "event"(id) ON DELETE CASCADE
);
CREATE INDEX user_verification_event_id_idx ON user_verification USING btree (event_id);
CREATE INDEX user_verification_name_idx ON user_verification USING btree (name);
"#,
],
}
}
}
mod m002 {
use async_std::stream::StreamExt;
use indicatif::{ProgressBar, ProgressStyle};
use sqlx::Row;
use std::time::Instant;
use tracing::info;
use crate::event::{single_char_tagname, Event};
use crate::repo::postgres::PostgresPool;
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
use crate::utils::is_lower_hex;
pub const VERSION: i64 = 2;
pub fn migration() -> impl Migration {
SimpleSqlMigration {
serial_number: VERSION,
sql: vec![
r#"
-- Add tag value column
ALTER TABLE tag ADD COLUMN value_hex bytea;
-- Remove not-null constraint
ALTER TABLE tag ALTER COLUMN value DROP NOT NULL;
-- Add value index
CREATE INDEX tag_value_hex_idx ON tag USING btree (value_hex);
"#,
],
}
}
pub async fn rebuild_tags(db: &PostgresPool) -> crate::error::Result<()> {
// Check how many events we have to process
let start = Instant::now();
let mut tx = db.begin().await.unwrap();
let mut update_tx = db.begin().await.unwrap();
// Clear out table
sqlx::query("DELETE FROM tag;")
.execute(&mut update_tx)
.await?;
{
let event_count: i64 = sqlx::query_scalar("SELECT COUNT(*) from event;")
.fetch_one(&mut tx)
.await
.unwrap();
let bar = ProgressBar::new(event_count.try_into().unwrap())
.with_message("rebuilding tags table");
bar.set_style(
ProgressStyle::with_template(
"[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}",
)
.unwrap(),
);
let mut events =
sqlx::query("SELECT id, content FROM event ORDER BY id;").fetch(&mut tx);
while let Some(row) = events.next().await {
bar.inc(1);
// get the row id and content
let row = row.unwrap();
let event_id: Vec<u8> = row.get(0);
let event_bytes: Vec<u8> = row.get(1);
let event: Event = serde_json::from_str(&String::from_utf8(event_bytes).unwrap())?;
for t in event.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.first().unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
// safe because len was > 1
let tagval = t.get(1).unwrap();
// insert as BLOB if we can restore it losslessly.
// this means it needs to be even length and lowercase.
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
let q = "INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES ($1, $2, NULL, $3) ON CONFLICT DO NOTHING;";
sqlx::query(q)
.bind(&event_id)
.bind(tagname)
.bind(hex::decode(tagval).ok())
.execute(&mut update_tx)
.await?;
} else {
let q = "INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES ($1, $2, $3, NULL) ON CONFLICT DO NOTHING;";
sqlx::query(q)
.bind(&event_id)
.bind(tagname)
.bind(tagval.as_bytes())
.execute(&mut update_tx)
.await?;
}
}
}
update_tx.commit().await?;
bar.finish();
}
info!("rebuilt tags in {:?}", start.elapsed());
Ok(())
}
}
mod m003 {
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
pub const VERSION: i64 = 3;
pub fn migration() -> impl Migration {
SimpleSqlMigration {
serial_number: VERSION,
sql: vec![
r#"
-- Add unique constraint on tag
ALTER TABLE tag ADD CONSTRAINT unique_constraint_name UNIQUE (event_id, "name", value, value_hex);
"#,
],
}
}
}
mod m004 {
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
pub const VERSION: i64 = 4;
pub fn migration() -> impl Migration {
SimpleSqlMigration {
serial_number: VERSION,
sql: vec![
r#"
-- Add expiration time for events
ALTER TABLE event ADD COLUMN expires_at timestamp(0) with time zone;
-- Index expiration time
CREATE INDEX event_expires_at_idx ON "event" (expires_at);
"#,
],
}
}
}
mod m005 {
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
pub const VERSION: i64 = 5;
pub fn migration() -> impl Migration {
SimpleSqlMigration {
serial_number: VERSION,
sql: vec![
r#"
-- Create account table
CREATE TABLE "account" (
pubkey varchar NOT NULL,
is_admitted BOOLEAN NOT NULL DEFAULT FALSE,
balance BIGINT NOT NULL DEFAULT 0,
tos_accepted_at TIMESTAMP,
CONSTRAINT account_pkey PRIMARY KEY (pubkey)
);
CREATE TYPE status AS ENUM ('Paid', 'Unpaid', 'Expired');
CREATE TABLE "invoice" (
payment_hash varchar NOT NULL,
pubkey varchar NOT NULL,
invoice varchar NOT NULL,
amount BIGINT NOT NULL,
status status NOT NULL DEFAULT 'Unpaid',
description varchar,
created_at timestamp,
confirmed_at timestamp,
CONSTRAINT invoice_payment_hash PRIMARY KEY (payment_hash),
CONSTRAINT invoice_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES account (pubkey) ON DELETE CASCADE
);
"#,
],
}
}
}

1353
src/repo/sqlite.rs Normal file

File diff suppressed because it is too large Load Diff

@@ -0,0 +1,841 @@
//! Database schema and migrations
use crate::db::PooledConnection;
use crate::error::Result;
use crate::event::{single_char_tagname, Event};
use crate::utils::is_lower_hex;
use const_format::formatcp;
use indicatif::{ProgressBar, ProgressStyle};
use rusqlite::limits::Limit;
use rusqlite::params;
use rusqlite::Connection;
use std::cmp::Ordering;
use std::time::Instant;
use tracing::{debug, error, info};
/// Startup DB Pragmas
pub const STARTUP_SQL: &str = r##"
PRAGMA main.synchronous = NORMAL;
PRAGMA foreign_keys = ON;
PRAGMA journal_size_limit = 32768;
PRAGMA temp_store = 2; -- use memory, not temp files
PRAGMA main.cache_size = 20000; -- 80MB max cache size per conn
pragma mmap_size = 0; -- disable mmap (default)
"##;
/// Latest database version
pub const DB_VERSION: usize = 18;
/// Schema definition
const INIT_SQL: &str = formatcp!(
r##"
-- Database settings
PRAGMA encoding = "UTF-8";
PRAGMA journal_mode = WAL;
PRAGMA auto_vacuum = FULL;
PRAGMA main.synchronous=NORMAL;
PRAGMA foreign_keys = ON;
PRAGMA application_id = 1654008667;
PRAGMA user_version = {};
-- Event Table
CREATE TABLE IF NOT EXISTS event (
id INTEGER PRIMARY KEY,
event_hash BLOB NOT NULL, -- 32-byte SHA256 hash
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970)
created_at INTEGER NOT NULL, -- when the event was authored
expires_at INTEGER, -- when the event expires and may be deleted
author BLOB NOT NULL, -- author pubkey
delegated_by BLOB, -- delegator pubkey (NIP-26)
kind INTEGER NOT NULL, -- event kind
hidden INTEGER, -- relevant for queries
content TEXT NOT NULL -- serialized json of event object
);
-- Event Indexes
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
CREATE INDEX IF NOT EXISTS author_index ON event(author);
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author);
CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at);
CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at);
CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind);
CREATE INDEX IF NOT EXISTS event_expiration ON event(expires_at);
-- Tag Table
-- Tag values are stored as either a BLOB (if they come in as a
-- hex-string), or TEXT otherwise.
-- This means that searches need to select the appropriate column.
-- We duplicate the kind/created_at to make indexes much more efficient.
CREATE TABLE IF NOT EXISTS tag (
id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
name TEXT, -- the tag name ("p", "e", whatever)
value TEXT, -- the tag value, if not hex.
value_hex BLOB, -- the tag value, if it can be interpreted as a lowercase hex string.
created_at INTEGER NOT NULL, -- when the event was authored
kind INTEGER NOT NULL, -- event kind
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value);
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value);
CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,event_id);
-- NIP-05 User Validation
CREATE TABLE IF NOT EXISTS user_verification (
id INTEGER PRIMARY KEY,
metadata_event INTEGER NOT NULL, -- the metadata event used for this validation.
name TEXT NOT NULL, -- the nip05 field value (user@domain).
verified_at INTEGER, -- timestamp this author/nip05 was most recently verified.
failed_at INTEGER, -- timestamp a verification attempt failed (host down).
failure_count INTEGER DEFAULT 0, -- number of consecutive failures.
FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
-- Create account table
CREATE TABLE IF NOT EXISTS account (
pubkey TEXT PRIMARY KEY,
is_admitted INTEGER NOT NULL DEFAULT 0,
balance INTEGER NOT NULL DEFAULT 0,
tos_accepted_at INTEGER
);
-- Create account index
CREATE INDEX IF NOT EXISTS user_pubkey_index ON account(pubkey);
-- Invoice table
CREATE TABLE IF NOT EXISTS invoice (
payment_hash TEXT PRIMARY KEY,
pubkey TEXT NOT NULL,
invoice TEXT NOT NULL,
amount INTEGER NOT NULL,
status TEXT CHECK ( status IN ('Paid', 'Unpaid', 'Expired' ) ) NOT NUll DEFAULT 'Unpaid',
description TEXT,
created_at INTEGER NOT NULL,
confirmed_at INTEGER,
CONSTRAINT invoice_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES account (pubkey) ON DELETE CASCADE
);
-- Create invoice index
CREATE INDEX IF NOT EXISTS invoice_pubkey_index ON invoice(pubkey);
"##,
DB_VERSION
);
/// Determine the current application database schema version.
pub fn curr_db_version(conn: &mut Connection) -> Result<usize> {
let query = "PRAGMA user_version;";
let curr_version = conn.query_row(query, [], |row| row.get(0))?;
Ok(curr_version)
}
/// Determine event count
pub fn db_event_count(conn: &mut Connection) -> Result<usize> {
let query = "SELECT count(*) FROM event;";
let count = conn.query_row(query, [], |row| row.get(0))?;
Ok(count)
}
/// Determine tag count
pub fn db_tag_count(conn: &mut Connection) -> Result<usize> {
let query = "SELECT count(*) FROM tag;";
let count = conn.query_row(query, [], |row| row.get(0))?;
Ok(count)
}
fn mig_init(conn: &mut PooledConnection) -> usize {
match conn.execute_batch(INIT_SQL) {
Ok(()) => {
info!(
"database pragma/schema initialized to v{}, and ready",
DB_VERSION
);
}
Err(err) => {
error!("update (init) failed: {}", err);
panic!("database could not be initialized");
}
}
DB_VERSION
}
/// Upgrade DB to latest version, and execute pragma settings
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<usize> {
// check the version.
let mut curr_version = curr_db_version(conn)?;
info!("DB version = {:?}", curr_version);
debug!(
"SQLite max query parameters: {}",
conn.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER)
);
debug!(
"SQLite max table/blob/text length: {} MB",
(f64::from(conn.limit(Limit::SQLITE_LIMIT_LENGTH)) / f64::from(1024 * 1024)).floor()
);
debug!(
"SQLite max SQL length: {} MB",
(f64::from(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH)) / f64::from(1024 * 1024)).floor()
);
match curr_version.cmp(&DB_VERSION) {
// Database is new or not current
Ordering::Less => {
// initialize from scratch
if curr_version == 0 {
curr_version = mig_init(conn);
}
// for initialized but out-of-date schemas, proceed to
// upgrade sequentially until we are current.
if curr_version == 1 {
curr_version = mig_1_to_2(conn)?;
}
if curr_version == 2 {
curr_version = mig_2_to_3(conn)?;
}
if curr_version == 3 {
curr_version = mig_3_to_4(conn)?;
}
if curr_version == 4 {
curr_version = mig_4_to_5(conn)?;
}
if curr_version == 5 {
curr_version = mig_5_to_6(conn)?;
}
if curr_version == 6 {
curr_version = mig_6_to_7(conn)?;
}
if curr_version == 7 {
curr_version = mig_7_to_8(conn)?;
}
if curr_version == 8 {
curr_version = mig_8_to_9(conn)?;
}
if curr_version == 9 {
curr_version = mig_9_to_10(conn)?;
}
if curr_version == 10 {
curr_version = mig_10_to_11(conn)?;
}
if curr_version == 11 {
curr_version = mig_11_to_12(conn)?;
}
if curr_version == 12 {
curr_version = mig_12_to_13(conn)?;
}
if curr_version == 13 {
curr_version = mig_13_to_14(conn)?;
}
if curr_version == 14 {
curr_version = mig_14_to_15(conn)?;
}
if curr_version == 15 {
curr_version = mig_15_to_16(conn)?;
}
if curr_version == 16 {
curr_version = mig_16_to_17(conn)?;
}
if curr_version == 17 {
curr_version = mig_17_to_18(conn)?;
}
if curr_version == DB_VERSION {
info!(
"All migration scripts completed successfully. Welcome to v{}.",
DB_VERSION
);
}
}
// Database is current, all is good
Ordering::Equal => {
debug!("Database version was already current (v{DB_VERSION})");
}
// Database is newer than what this code understands, abort
Ordering::Greater => {
panic!(
"Database version is newer than supported by this executable (v{curr_version} > v{DB_VERSION})",
);
}
}
// Setup PRAGMA
conn.execute_batch(STARTUP_SQL)?;
debug!("SQLite PRAGMA startup completed");
Ok(DB_VERSION)
}
pub fn rebuild_tags(conn: &mut PooledConnection) -> Result<()> {
// Check how many events we have to process
let count = db_event_count(conn)?;
let update_each_percent = 0.05;
let mut percent_done = 0.0;
let mut events_processed = 0;
let start = Instant::now();
let tx = conn.transaction()?;
{
// Clear out table
tx.execute("DELETE FROM tag;", [])?;
let mut stmt = tx.prepare("select id, content from event order by id;")?;
let mut tag_rows = stmt.query([])?;
while let Some(row) = tag_rows.next()? {
if (events_processed as f32) / (count as f32) > percent_done {
info!("Tag update {}% complete...", (100.0 * percent_done).round());
percent_done += update_each_percent;
}
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_id: u64 = row.get(0)?;
let event_json: String = row.get(1)?;
let event: Event = serde_json::from_str(&event_json)?;
// look at each event, and each tag, creating new tag entries if appropriate.
for t in event.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.first().unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
// safe because len was > 1
let tagval = t.get(1).unwrap();
// insert as BLOB if we can restore it losslessly.
// this means it needs to be even length and lowercase.
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
tx.execute(
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
params![event_id, tagname, hex::decode(tagval).ok()],
)?;
} else {
// otherwise, insert as text
tx.execute(
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
params![event_id, tagname, &tagval],
)?;
}
}
events_processed += 1;
}
}
tx.commit()?;
info!("rebuilt tags in {:?}", start.elapsed());
Ok(())
}
// Migration Scripts
fn mig_1_to_2(conn: &mut PooledConnection) -> Result<usize> {
// only change is adding a hidden column to events.
let upgrade_sql = r##"
ALTER TABLE event ADD hidden INTEGER;
UPDATE event SET hidden=FALSE;
PRAGMA user_version = 2;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v1 -> v2");
}
Err(err) => {
error!("update (v1->v2) failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(2)
}
fn mig_2_to_3(conn: &mut PooledConnection) -> Result<usize> {
// this version lacks the tag column
info!("database schema needs update from 2->3");
let upgrade_sql = r##"
CREATE TABLE IF NOT EXISTS tag (
id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
name TEXT, -- the tag name ("p", "e", whatever)
value TEXT, -- the tag value, if not hex.
value_hex BLOB, -- the tag value, if it can be interpreted as a hex string.
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
PRAGMA user_version = 3;
"##;
// TODO: load existing refs into tag table
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v2 -> v3");
}
Err(err) => {
error!("update (v2->v3) failed: {}", err);
panic!("database could not be upgraded");
}
}
// iterate over every event/pubkey tag
let tx = conn.transaction()?;
{
let mut stmt = tx.prepare("select event_id, \"e\", lower(hex(referenced_event)) from event_ref union select event_id, \"p\", lower(hex(referenced_pubkey)) from pubkey_ref;")?;
let mut tag_rows = stmt.query([])?;
while let Some(row) = tag_rows.next()? {
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_id: u64 = row.get(0)?;
let tag_name: String = row.get(1)?;
let tag_value: String = row.get(2)?;
// this will leave behind p/e tags that were non-hex, but they are invalid anyways.
if is_lower_hex(&tag_value) {
tx.execute(
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
params![event_id, tag_name, hex::decode(&tag_value).ok()],
)?;
}
}
}
info!("Updated tag values");
tx.commit()?;
Ok(3)
}
fn mig_3_to_4(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 3->4");
let upgrade_sql = r##"
-- incoming metadata events with nip05
CREATE TABLE IF NOT EXISTS user_verification (
id INTEGER PRIMARY KEY,
metadata_event INTEGER NOT NULL, -- the metadata event used for this validation.
name TEXT NOT NULL, -- the nip05 field value (user@domain).
verified_at INTEGER, -- timestamp this author/nip05 was most recently verified.
failed_at INTEGER, -- timestamp a verification attempt failed (host down).
failure_count INTEGER DEFAULT 0, -- number of consecutive failures.
FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
PRAGMA user_version = 4;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v3 -> v4");
}
Err(err) => {
error!("update (v3->v4) failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(4)
}
fn mig_4_to_5(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 4->5");
let upgrade_sql = r##"
DROP TABLE IF EXISTS event_ref;
DROP TABLE IF EXISTS pubkey_ref;
PRAGMA user_version=5;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v4 -> v5");
}
Err(err) => {
error!("update (v4->v5) failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(5)
}
fn mig_5_to_6(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 5->6");
// We need to rebuild the tags table. iterate through the
// event table. build event from json, insert tags into a
// fresh tag table. This was needed due to a logic error in
// how hex-like tags got indexed.
let start = Instant::now();
let tx = conn.transaction()?;
{
// Clear out table
tx.execute("DELETE FROM tag;", [])?;
let mut stmt = tx.prepare("select id, content from event order by id;")?;
let mut tag_rows = stmt.query([])?;
while let Some(row) = tag_rows.next()? {
let event_id: u64 = row.get(0)?;
let event_json: String = row.get(1)?;
let event: Event = serde_json::from_str(&event_json)?;
// look at each event, and each tag, creating new tag entries if appropriate.
for t in event.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.first().unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
// safe because len was > 1
let tagval = t.get(1).unwrap();
// insert as BLOB if we can restore it losslessly.
// this means it needs to be even length and lowercase.
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
tx.execute(
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
params![event_id, tagname, hex::decode(tagval).ok()],
)?;
} else {
// otherwise, insert as text
tx.execute(
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
params![event_id, tagname, &tagval],
)?;
}
}
}
tx.execute("PRAGMA user_version = 6;", [])?;
}
tx.commit()?;
info!("database schema upgraded v5 -> v6 in {:?}", start.elapsed());
// vacuum after large table modification
let start = Instant::now();
conn.execute("VACUUM;", [])?;
info!("vacuumed DB after tags rebuild in {:?}", start.elapsed());
Ok(6)
}
fn mig_6_to_7(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 6->7");
let upgrade_sql = r##"
ALTER TABLE event ADD delegated_by BLOB;
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
PRAGMA user_version = 7;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v6 -> v7");
}
Err(err) => {
error!("update (v6->v7) failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(7)
}
fn mig_7_to_8(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 7->8");
// Remove redundant indexes, and add a better multi-column index.
let upgrade_sql = r##"
DROP INDEX IF EXISTS created_at_index;
DROP INDEX IF EXISTS kind_index;
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
PRAGMA user_version = 8;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v7 -> v8");
}
Err(err) => {
error!("update (v7->v8) failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(8)
}
fn mig_8_to_9(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 8->9");
// Those old indexes were actually helpful...
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
PRAGMA user_version = 9;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v8 -> v9");
}
Err(err) => {
error!("update (v8->v9) failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(9)
}
fn mig_9_to_10(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 9->10");
// Those old indexes were actually helpful...
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
PRAGMA user_version = 10;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v9 -> v10");
}
Err(err) => {
error!("update (v9->v10) failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(10)
}
fn mig_10_to_11(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 10->11");
// Those old indexes were actually helpful...
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
reindex;
pragma optimize;
PRAGMA user_version = 11;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v10 -> v11");
}
Err(err) => {
error!("update (v10->v11) failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(11)
}
fn mig_11_to_12(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 11->12");
let start = Instant::now();
let tx = conn.transaction()?;
{
// Lookup every replaceable event
let mut stmt = tx.prepare("select kind,author from event where kind in (0,3,41) or (kind>=10000 and kind<20000) order by id;")?;
let mut replaceable_rows = stmt.query([])?;
info!("updating replaceable events; this could take awhile...");
while let Some(row) = replaceable_rows.next()? {
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_kind: u64 = row.get(0)?;
let event_author: Vec<u8> = row.get(1)?;
tx.execute(
"UPDATE event SET hidden=TRUE WHERE hidden!=TRUE and kind=? and author=? and id NOT IN (SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1)",
params![event_kind, event_author, event_kind, event_author],
)?;
}
tx.execute("PRAGMA user_version = 12;", [])?;
}
tx.commit()?;
info!(
"database schema upgraded v11 -> v12 in {:?}",
start.elapsed()
);
// vacuum after large table modification
let start = Instant::now();
conn.execute("VACUUM;", [])?;
info!(
"vacuumed DB after hidden event cleanup in {:?}",
start.elapsed()
);
Ok(12)
}
fn mig_12_to_13(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 12->13");
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author);
reindex;
pragma optimize;
PRAGMA user_version = 13;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v12 -> v13");
}
Err(err) => {
error!("update (v12->v13) failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(13)
}
fn mig_13_to_14(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 13->14");
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at);
pragma optimize;
PRAGMA user_version = 14;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v13 -> v14");
}
Err(err) => {
error!("update (v13->v14) failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(14)
}
fn mig_14_to_15(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 14->15");
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at);
CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind);
PRAGMA user_version = 15;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v14 -> v15");
}
Err(err) => {
error!("update (v14->v15) failed: {}", err);
panic!("database could not be upgraded");
}
}
// clear out hidden events
let clear_hidden_sql = r##"DELETE FROM event WHERE HIDDEN=true;"##;
info!("removing hidden events; this may take awhile...");
match conn.execute_batch(clear_hidden_sql) {
Ok(()) => {
info!("all hidden events removed");
}
Err(err) => {
error!("delete failed: {}", err);
panic!("could not remove hidden events");
}
}
Ok(15)
}
fn mig_15_to_16(conn: &mut PooledConnection) -> Result<usize> {
let count = db_event_count(conn)?;
info!("database schema needs update from 15->16 (this may take a few minutes)");
let upgrade_sql = r##"
DROP TABLE tag;
CREATE TABLE tag (
id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
name TEXT, -- the tag name ("p", "e", whatever)
value TEXT, -- the tag value, if not hex.
created_at INTEGER NOT NULL, -- when the event was authored
kind INTEGER NOT NULL, -- event kind
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value);
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value);
CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,event_id);
"##;
let start = Instant::now();
let tx = conn.transaction()?;
let bar = ProgressBar::new(count.try_into().unwrap()).with_message("rebuilding tags table");
bar.set_style(
ProgressStyle::with_template(
"[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}",
)
.unwrap(),
);
{
tx.execute_batch(upgrade_sql)?;
let mut stmt =
tx.prepare("select id, kind, created_at, content from event order by id;")?;
let mut tag_rows = stmt.query([])?;
let mut count = 0;
while let Some(row) = tag_rows.next()? {
count += 1;
if count % 10 == 0 {
bar.inc(10);
}
let event_id: u64 = row.get(0)?;
let kind: u64 = row.get(1)?;
let created_at: u64 = row.get(2)?;
let event_json: String = row.get(3)?;
let event: Event = serde_json::from_str(&event_json)?;
// look at each event, and each tag, creating new tag entries if appropriate.
for t in event.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.first().unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
// safe because len was > 1
let tagval = t.get(1).unwrap();
// otherwise, insert as text
tx.execute(
"INSERT INTO tag (event_id, name, value, kind, created_at) VALUES (?1, ?2, ?3, ?4, ?5);",
params![event_id, tagname, &tagval, kind, created_at],
)?;
}
}
tx.execute("PRAGMA user_version = 16;", [])?;
}
bar.finish();
tx.commit()?;
info!(
"database schema upgraded v15 -> v16 in {:?}",
start.elapsed()
);
Ok(16)
}
fn mig_16_to_17(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 16->17");
let upgrade_sql = r##"
ALTER TABLE event ADD COLUMN expires_at INTEGER;
CREATE INDEX IF NOT EXISTS event_expiration ON event(expires_at);
PRAGMA user_version = 17;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v16 -> v17");
}
Err(err) => {
error!("update (v16->v17) failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(17)
}
fn mig_17_to_18(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 17->18");
let upgrade_sql = r##"
-- Create invoices table
CREATE TABLE IF NOT EXISTS invoice (
payment_hash TEXT PRIMARY KEY,
pubkey TEXT NOT NULL,
invoice TEXT NOT NULL,
amount INTEGER NOT NULL,
status TEXT CHECK ( status IN ('Paid', 'Unpaid', 'Expired' ) ) NOT NUll DEFAULT 'Unpaid',
description TEXT,
created_at INTEGER NOT NULL,
confirmed_at INTEGER,
CONSTRAINT invoice_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES account (pubkey) ON DELETE CASCADE
);
-- Create invoice index
CREATE INDEX IF NOT EXISTS invoice_pubkey_index ON invoice(pubkey);
-- Create account table
CREATE TABLE IF NOT EXISTS account (
pubkey TEXT PRIMARY KEY,
is_admitted INTEGER NOT NULL DEFAULT 0,
balance INTEGER NOT NULL DEFAULT 0,
tos_accepted_at INTEGER
);
-- Create account index
CREATE INDEX IF NOT EXISTS account_pubkey_index ON account(pubkey);
pragma optimize;
PRAGMA user_version = 18;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v17 -> v18");
}
Err(err) => {
error!("update (v17->v18) failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(18)
}

1465
src/server.rs Normal file

File diff suppressed because it is too large Load Diff

@@ -2,13 +2,14 @@
use crate::error::Result;
use crate::event::Event;
use serde::de::Unexpected;
use serde::{Deserialize, Deserializer, Serialize};
use serde::ser::SerializeMap;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_json::Value;
use std::collections::HashMap;
use std::collections::HashSet;
/// Subscription identifier and set of request filters
#[derive(Serialize, PartialEq, Debug, Clone)]
#[derive(Serialize, PartialEq, Eq, Debug, Clone)]
pub struct Subscription {
pub id: String,
pub filters: Vec<ReqFilter>,
@@ -19,7 +20,7 @@ pub struct Subscription {
/// Corresponds to client-provided subscription request elements. Any
/// element can be present if it should be used in filtering, or
/// absent ([`None`]) if it should be ignored.
#[derive(Serialize, PartialEq, Debug, Clone)]
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct ReqFilter {
/// Event hashes
pub ids: Option<Vec<String>>,
@@ -31,9 +32,50 @@ pub struct ReqFilter {
pub until: Option<u64>,
/// List of author public keys
pub authors: Option<Vec<String>>,
/// Limit number of results
pub limit: Option<u64>,
/// Set of tags
#[serde(skip)]
pub tags: Option<HashMap<String, HashSet<String>>>,
pub tags: Option<HashMap<char, HashSet<String>>>,
/// Force no matches due to malformed data
// we can't represent it in the req filter, so we don't want to
// erroneously match. This basically indicates the req tried to
// do something invalid.
pub force_no_match: bool,
}
impl Serialize for ReqFilter {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut map = serializer.serialize_map(None)?;
if let Some(ids) = &self.ids {
map.serialize_entry("ids", &ids)?;
}
if let Some(kinds) = &self.kinds {
map.serialize_entry("kinds", &kinds)?;
}
if let Some(until) = &self.until {
map.serialize_entry("until", until)?;
}
if let Some(since) = &self.since {
map.serialize_entry("since", since)?;
}
if let Some(limit) = &self.limit {
map.serialize_entry("limit", limit)?;
}
if let Some(authors) = &self.authors {
map.serialize_entry("authors", &authors)?;
}
// serialize tags
if let Some(tags) = &self.tags {
for (k, v) in tags {
let vals: Vec<&String> = v.iter().collect();
map.serialize_entry(&format!("#{k}"), &vals)?;
}
}
map.end()
}
}
impl<'de> Deserialize<'de> for ReqFilter {
@@ -54,36 +96,63 @@ impl<'de> Deserialize<'de> for ReqFilter {
since: None,
until: None,
authors: None,
limit: None,
tags: None,
force_no_match: false,
};
let empty_string = "".into();
let mut ts = None;
// iterate through each key, and assign values that exist
for (key, val) in filter.into_iter() {
for (key, val) in filter {
// ids
if key == "ids" {
rf.ids = Deserialize::deserialize(val).ok();
let raw_ids: Option<Vec<String>> = Deserialize::deserialize(val).ok();
if let Some(a) = raw_ids.as_ref() {
if a.contains(&empty_string) {
return Err(serde::de::Error::invalid_type(
Unexpected::Other("prefix matches must not be empty strings"),
&"a json object",
));
}
}
rf.ids = raw_ids;
} else if key == "kinds" {
rf.kinds = Deserialize::deserialize(val).ok();
} else if key == "since" {
rf.since = Deserialize::deserialize(val).ok();
} else if key == "until" {
rf.until = Deserialize::deserialize(val).ok();
} else if key == "limit" {
rf.limit = Deserialize::deserialize(val).ok();
} else if key == "authors" {
rf.authors = Deserialize::deserialize(val).ok();
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
// remove the prefix
let tagname = &key[1..];
if ts.is_none() {
// Initialize the tag if necessary
ts = Some(HashMap::new());
}
if let Some(m) = ts.as_mut() {
let tag_vals: Option<Vec<String>> = Deserialize::deserialize(val).ok();
if let Some(v) = tag_vals {
let hs = HashSet::from_iter(v.into_iter());
m.insert(tagname.to_owned(), hs);
let raw_authors: Option<Vec<String>> = Deserialize::deserialize(val).ok();
if let Some(a) = raw_authors.as_ref() {
if a.contains(&empty_string) {
return Err(serde::de::Error::invalid_type(
Unexpected::Other("prefix matches must not be empty strings"),
&"a json object",
));
}
};
}
rf.authors = raw_authors;
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
if let Some(tag_search) = tag_search_char_from_filter(key) {
if ts.is_none() {
// Initialize the tag if necessary
ts = Some(HashMap::new());
}
if let Some(m) = ts.as_mut() {
let tag_vals: Option<Vec<String>> = Deserialize::deserialize(val).ok();
if let Some(v) = tag_vals {
let hs = v.into_iter().collect::<HashSet<_>>();
m.insert(tag_search.to_owned(), hs);
}
};
} else {
// tag search that is multi-character, don't add to subscription
rf.force_no_match = true;
continue;
}
}
}
rf.tags = ts;
@@ -91,6 +160,26 @@ impl<'de> Deserialize<'de> for ReqFilter {
}
}
/// Attempt to form a single-char identifier from a tag search filter
fn tag_search_char_from_filter(tagname: &str) -> Option<char> {
let tagname_nohash = &tagname[1..];
// We return the tag character if and only if the tagname consists
// of a single char.
let mut tagnamechars = tagname_nohash.chars();
let firstchar = tagnamechars.next();
match firstchar {
Some(_) => {
// check second char
if tagnamechars.next().is_none() {
firstchar
} else {
None
}
}
None => None,
}
}
impl<'de> Deserialize<'de> for Subscription {
/// Custom deserializer for subscriptions, which have a more
/// complex structure than the other message types.
@@ -99,7 +188,7 @@ impl<'de> Deserialize<'de> for Subscription {
D: Deserializer<'de>,
{
let mut v: Value = Deserialize::deserialize(deserializer)?;
// this shoud be a 3-or-more element array.
// this should be a 3-or-more element array.
// verify the first element is a String, REQ
// get the subscription from the second element.
// convert each of the remaining objects into filters
@@ -136,6 +225,7 @@ impl<'de> Deserialize<'de> for Subscription {
// create indexes
filters.push(f);
}
filters.dedup();
Ok(Subscription {
id: sub_id.to_owned(),
filters,
@@ -145,19 +235,52 @@ impl<'de> Deserialize<'de> for Subscription {
impl Subscription {
/// Get a copy of the subscription identifier.
#[must_use]
pub fn get_id(&self) -> String {
self.id.clone()
}
/// Determine if any filter is requesting historical (database)
/// queries. If every filter has limit:0, we do not need to query the DB.
#[must_use]
pub fn needs_historical_events(&self) -> bool {
self.filters.iter().any(|f| f.limit != Some(0))
}
/// Determine if this subscription matches a given [`Event`]. Any
/// individual filter match is sufficient.
#[must_use]
pub fn interested_in_event(&self, event: &Event) -> bool {
for f in self.filters.iter() {
for f in &self.filters {
if f.interested_in_event(event) {
return true;
}
}
false
}
/// Is this subscription defined as a scraper query
pub fn is_scraper(&self) -> bool {
for f in &self.filters {
let mut precision = 0;
if f.ids.is_some() {
precision += 2;
}
if f.authors.is_some() {
precision += 1;
}
if f.kinds.is_some() {
precision += 1;
}
if f.tags.is_some() {
precision += 1;
}
if precision < 2 {
return true;
}
}
false
}
}
fn prefix_match(prefixes: &[String], target: &str) -> bool {
@@ -174,22 +297,30 @@ impl ReqFilter {
fn ids_match(&self, event: &Event) -> bool {
self.ids
.as_ref()
.map(|vs| prefix_match(vs, &event.id))
.unwrap_or(true)
.map_or(true, |vs| prefix_match(vs, &event.id))
}
fn authors_match(&self, event: &Event) -> bool {
self.authors
.as_ref()
.map(|vs| prefix_match(vs, &event.pubkey))
.unwrap_or(true)
.map_or(true, |vs| prefix_match(vs, &event.pubkey))
}
fn delegated_authors_match(&self, event: &Event) -> bool {
if let Some(delegated_pubkey) = &event.delegated_by {
self.authors
.as_ref()
.map_or(true, |vs| prefix_match(vs, delegated_pubkey))
} else {
false
}
}
fn tag_match(&self, event: &Event) -> bool {
// get the hashset from the filter.
if let Some(map) = &self.tags {
for (key, val) in map.iter() {
let tag_match = event.generic_tag_val_intersect(key, val);
let tag_match = event.generic_tag_val_intersect(*key, val);
// if there is no match for this tag, the match fails.
if !tag_match {
return false;
@@ -203,20 +334,20 @@ impl ReqFilter {
/// Check if this filter either matches, or does not care about the kind.
fn kind_match(&self, kind: u64) -> bool {
self.kinds
.as_ref()
.map(|ks| ks.contains(&kind))
.unwrap_or(true)
self.kinds.as_ref().map_or(true, |ks| ks.contains(&kind))
}
/// Determine if all populated fields in this filter match the provided event.
#[must_use]
pub fn interested_in_event(&self, event: &Event) -> bool {
// self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
self.ids_match(event)
&& self.since.map(|t| event.created_at > t).unwrap_or(true)
&& self.since.map_or(true, |t| event.created_at >= t)
&& self.until.map_or(true, |t| event.created_at <= t)
&& self.kind_match(event.kind)
&& self.authors_match(event)
&& (self.authors_match(event) || self.delegated_authors_match(event))
&& self.tag_match(event)
&& !self.force_no_match
}
}
@@ -230,7 +361,7 @@ mod tests {
let s: Subscription = serde_json::from_str(raw_json)?;
assert_eq!(s.id, "some-id");
assert_eq!(s.filters.len(), 1);
assert_eq!(s.filters.get(0).unwrap().authors, None);
assert_eq!(s.filters.first().unwrap().authors, None);
Ok(())
}
@@ -246,6 +377,24 @@ mod tests {
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
}
#[test]
fn req_empty_authors_prefix() {
let raw_json = "[\"REQ\",\"some-id\",{\"authors\": [\"\"]}]";
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
}
#[test]
fn req_empty_ids_prefix() {
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\"]}]";
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
}
#[test]
fn req_empty_ids_prefix_mixed() {
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\",\"aaa\"]}]";
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
}
#[test]
fn legacy_filter() {
// legacy field in filter
@@ -253,13 +402,30 @@ mod tests {
assert!(serde_json::from_str::<Subscription>(raw_json).is_ok());
}
#[test]
fn dupe_filter() -> Result<()> {
let raw_json = r#"["REQ","some-id",{"kinds": [1984]}, {"kinds": [1984]}]"#;
let s: Subscription = serde_json::from_str(raw_json)?;
assert_eq!(s.filters.len(), 1);
Ok(())
}
#[test]
fn dupe_filter_many() -> Result<()> {
// duplicate filters in different order
let raw_json = r#"["REQ","some-id",{"kinds":[1984]},{"kinds":[1984]},{"kinds":[1984]},{"kinds":[1984]}]"#;
let s: Subscription = serde_json::from_str(raw_json)?;
assert_eq!(s.filters.len(), 1);
Ok(())
}
#[test]
fn author_filter() -> Result<()> {
let raw_json = r#"["REQ","some-id",{"authors": ["test-author-id"]}]"#;
let s: Subscription = serde_json::from_str(raw_json)?;
assert_eq!(s.id, "some-id");
assert_eq!(s.filters.len(), 1);
let first_filter = s.filters.get(0).unwrap();
let first_filter = s.filters.first().unwrap();
assert_eq!(
first_filter.authors,
Some(vec!("test-author-id".to_owned()))
@@ -274,6 +440,7 @@ mod tests {
let e = Event {
id: "foo".to_owned(),
pubkey: "abcd".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
@@ -292,6 +459,7 @@ mod tests {
let e = Event {
id: "abcd".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
@@ -310,6 +478,7 @@ mod tests {
let e = Event {
id: "abcde".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
@@ -321,6 +490,52 @@ mod tests {
Ok(())
}
#[test]
fn interest_until() -> Result<()> {
// subscription with a filter for ID and time
let s: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "until": 1000}]"#)?;
let e = Event {
id: "abc".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 50,
kind: 0,
tags: Vec::new(),
content: "".to_owned(),
sig: "".to_owned(),
tagidx: None,
};
assert!(s.interested_in_event(&e));
Ok(())
}
#[test]
fn interest_range() -> Result<()> {
// subscription with a filter for ID and time
let s_in: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 200}]"#)?;
let s_before: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 140}]"#)?;
let s_after: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 160, "until": 200}]"#)?;
let e = Event {
id: "abc".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 150,
kind: 0,
tags: Vec::new(),
content: "".to_owned(),
sig: "".to_owned(),
tagidx: None,
};
assert!(s_in.interested_in_event(&e));
assert!(!s_before.interested_in_event(&e));
assert!(!s_after.interested_in_event(&e));
Ok(())
}
#[test]
fn interest_time_and_id() -> Result<()> {
// subscription with a filter for ID and time
@@ -329,6 +544,7 @@ mod tests {
let e = Event {
id: "abc".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 50,
kind: 0,
tags: Vec::new(),
@@ -347,6 +563,7 @@ mod tests {
let e = Event {
id: "abc".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 1001,
kind: 0,
tags: Vec::new(),
@@ -365,6 +582,7 @@ mod tests {
let e = Event {
id: "abc".to_owned(),
pubkey: "".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
@@ -383,6 +601,7 @@ mod tests {
let e = Event {
id: "123".to_owned(),
pubkey: "abc".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
@@ -393,14 +612,15 @@ mod tests {
assert!(s.interested_in_event(&e));
Ok(())
}
#[test]
#[test]
fn authors_multi_pubkey() -> Result<()> {
// check for any of a set of authors, against the pubkey
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors":["abc", "bcd"]}]"#)?;
let e = Event {
id: "123".to_owned(),
pubkey: "bcd".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
@@ -419,6 +639,7 @@ mod tests {
let e = Event {
id: "123".to_owned(),
pubkey: "xyz".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: Vec::new(),
@@ -429,4 +650,49 @@ mod tests {
assert!(!s.interested_in_event(&e));
Ok(())
}
#[test]
fn serialize_filter() -> Result<()> {
let s: Subscription = serde_json::from_str(
r##"["REQ","xyz",{"authors":["abc", "bcd"], "since": 10, "until": 20, "limit":100, "#e": ["foo", "bar"], "#d": ["test"]}]"##,
)?;
let f = s.filters.first();
let serialized = serde_json::to_string(&f)?;
let serialized_wrapped = format!(r##"["REQ", "xyz",{}]"##, serialized);
let parsed: Subscription = serde_json::from_str(&serialized_wrapped)?;
let parsed_filter = parsed.filters.first();
if let Some(pf) = parsed_filter {
assert_eq!(pf.since, Some(10));
assert_eq!(pf.until, Some(20));
assert_eq!(pf.limit, Some(100));
} else {
assert!(false, "filter could not be parsed");
}
Ok(())
}
#[test]
fn is_scraper() -> Result<()> {
assert!(serde_json::from_str::<Subscription>(
r#"["REQ","some-id",{"kinds": [1984],"since": 123,"limit":1}]"#
)?
.is_scraper());
assert!(serde_json::from_str::<Subscription>(
r#"["REQ","some-id",{"kinds": [1984]},{"kinds": [1984],"authors":["aaaa"]}]"#
)?
.is_scraper());
assert!(!serde_json::from_str::<Subscription>(
r#"["REQ","some-id",{"kinds": [1984],"authors":["aaaa"]}]"#
)?
.is_scraper());
assert!(
!serde_json::from_str::<Subscription>(r#"["REQ","some-id",{"ids": ["aaaa"]}]"#)?
.is_scraper()
);
assert!(!serde_json::from_str::<Subscription>(
r##"["REQ","some-id",{"#p": ["aaaa"],"kinds":[1,4]}]"##
)?
.is_scraper());
Ok(())
}
}

@@ -1,315 +0,0 @@
//! Tags used in events to link to another event or a pubkey
//!
//! Reference specification NIP01: https://github.com/fiatjaf/nostr/blob/master/nips/01.md#events-and-signatures
//!
use bitcoin_hashes::hex::ToHex;
use bitcoin_hashes::sha256;
use secp256k1::XOnlyPublicKey;
use std::str::FromStr;
use serde::de::Unexpected;
use serde::ser::SerializeSeq;
use serde::{Deserialize, Deserializer, Serializer};
use serde_json::Value;
use crate::error::Error;
type EventId = sha256::Hash;
#[derive(Debug, PartialEq)]
struct EventTag {
event_id: EventId,
recommended_url: Option<String>,
}
#[derive(Debug, PartialEq)]
struct PubkeyTag {
pubkey: XOnlyPublicKey,
recommended_url: Option<String>,
}
// Tag structure representing two possible types of tags
#[derive(Debug, PartialEq)]
enum Tag {
Event(EventTag),
Pubkey(PubkeyTag),
}
// Custom json serialization into protocol network format
// Event tag : ["e", "<32 byte event-id>", "optional<url>"]
// Pubkey tag : ["p", "<32 byte Xonly Pubkey>", "optional<url>"]
impl serde::Serialize for Tag {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::Event(event_tag) => {
let mut seq = serializer.serialize_seq(None)?;
seq.serialize_element("e")?;
seq.serialize_element(&event_tag.event_id.to_hex())?;
if let Some(url) = &event_tag.recommended_url {
seq.serialize_element(url)?;
} else {
}
seq.end()
}
Self::Pubkey(pubkey_tag) => {
let mut seq = serializer.serialize_seq(None)?;
seq.serialize_element("p")?;
seq.serialize_element(&pubkey_tag.pubkey.to_hex())?;
if let Some(url) = &pubkey_tag.recommended_url {
seq.serialize_element(url)?;
} else {
}
seq.end()
}
}
}
}
// Custom json deserialization from protocol network format
impl<'de> serde::Deserialize<'de> for Tag {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
// Receive incoming data in a json object
let received: Value = Deserialize::deserialize(deserializer)?;
// Check received data is a json array
let values = received.as_array().ok_or_else(|| {
serde::de::Error::invalid_type(Unexpected::Other("tag json object"), &"json array")
})?;
// Check json array contains only string
let values = values
.iter()
.map(|value| {
value.as_str().ok_or_else(|| {
serde::de::Error::invalid_type(
Unexpected::Other("tag json data"),
&"json string",
)
})
})
.collect::<Result<Vec<_>, _>>()?;
// Check length is not more tha 3
if values.len() > 3 {
Err(serde::de::Error::invalid_length(
values.len(),
&"tag length is 2 or 3",
))
} else {
// Parse the json array into appropriate types
match values[0] {
// This denotes an event type tag
"e" => {
let event_id = EventId::from_str(values[1])
.map_err(|e| serde::de::Error::custom(e.to_string()))?;
let recomended_url = if values.len() == 3 {
Some(values[2].into())
} else {
None
};
Ok(Tag::Event(EventTag {
event_id,
recommended_url: recomended_url,
}))
}
// This denotes a pubkey type tag
"p" => {
let pubkey = XOnlyPublicKey::from_str(values[1])
.map_err(|e| serde::de::Error::custom(e.to_string()))?;
let recomended_url = if values.len() == 3 {
Some(values[2].into())
} else {
None
};
Ok(Tag::Pubkey(PubkeyTag {
pubkey,
recommended_url: recomended_url,
}))
}
// Any other tag type is currently not supported
_ => Err(serde::de::Error::invalid_value(
Unexpected::Other("tag type flag"),
&"'e' or 'p'",
)),
}
}
}
}
#[allow(dead_code)]
// Some api (not public currently) to use Tags
impl Tag {
fn new_event_tag(event_id: EventId, recomended_url: Option<String>) -> Self {
Self::Event(EventTag {
event_id,
recommended_url: recomended_url,
})
}
fn new_pubkey_tag(pubkey: XOnlyPublicKey, recomended_url: Option<String>) -> Self {
Self::Pubkey(PubkeyTag {
pubkey,
recommended_url: recomended_url,
})
}
fn get_recomended_url(&self) -> Option<String> {
match self {
Self::Event(EventTag {
event_id: _,
recommended_url,
}) => recommended_url.clone(),
Self::Pubkey(PubkeyTag {
pubkey: _,
recommended_url,
}) => recommended_url.clone(),
}
}
fn get_event_id(&self) -> Result<EventId, Error> {
match self {
Self::Event(EventTag {
event_id,
recommended_url: _,
}) => Ok(*event_id),
_ => Err(Error::CustomError("Expected event tag".to_string())),
}
}
fn get_pubkey(&self) -> Result<XOnlyPublicKey, Error> {
match self {
Self::Pubkey(PubkeyTag {
pubkey,
recommended_url: _,
}) => Ok(*pubkey),
_ => Err(Error::CustomError("Expected pubkey tag".to_string())),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use bitcoin_hashes::Hash;
#[test]
fn serde_roundtrip() {
let pubkey = XOnlyPublicKey::from_str(
"18845781f631c48f1c9709e23092067d06837f30aa0cd0544ac887fe91ddd166",
)
.unwrap();
let url = "wss://rsslay.fiatjaf.com";
static HASH_BYTES: [u8; 32] = [
0xef, 0x53, 0x7f, 0x25, 0xc8, 0x95, 0xbf, 0xa7, 0x82, 0x52, 0x65, 0x29, 0xa9, 0xb6,
0x3d, 0x97, 0xaa, 0x63, 0x15, 0x64, 0xd5, 0xd7, 0x89, 0xc2, 0xb7, 0x65, 0x44, 0x8c,
0x86, 0x35, 0xfb, 0x6c,
];
let event_id = EventId::from_slice(&HASH_BYTES).expect("right number of bytes");
let event_tag = Tag::new_event_tag(event_id, Some(url.to_string()));
let pubkey_tag = Tag::new_pubkey_tag(pubkey, Some(url.to_string()));
let ser_event_tag = serde_json::to_string(&event_tag).unwrap();
let ser_pubkey_tag = serde_json::to_string(&pubkey_tag).unwrap();
let deser_event_tag: Tag = serde_json::from_str(&ser_event_tag).unwrap();
let deser_pubkey_tag: Tag = serde_json::from_str(&ser_pubkey_tag).unwrap();
assert_eq!(deser_event_tag, event_tag);
assert_eq!(deser_pubkey_tag, pubkey_tag);
}
#[test]
fn invalid_pubkey() {
let test_string = r#"["p","845781f631c48f1c9709e23092067d06837f30aa0cd0544ac887fe91ddd166","wss://rsslay.fiatjaf.com"]"#;
let tag: Result<Tag, _> = serde_json::from_str(test_string);
assert_eq!(
tag.err().expect("expect error").to_string(),
"secp: malformed public key".to_string()
);
}
#[test]
fn invalid_datatype() {
let test_string = r#"["p", 188457, "wss://rsslay.fiatjaf.com"]"#;
let tag: Result<Tag, _> = serde_json::from_str(test_string);
assert_eq!(
tag.err().expect("expect error").to_string(),
"invalid type: tag json data, expected json string".to_string()
);
}
#[test]
fn invalid_tagbyte() {
let test_string = r#"["q", "18845781f631c48f1c9709e23092067d06837f30aa0cd0544ac887fe91ddd166", "wss://rsslay.fiatjaf.com"]"#;
let tag: Result<Tag, _> = serde_json::from_str(test_string);
assert_eq!(
tag.err().expect("expect error").to_string(),
"invalid value: tag type flag, expected 'e' or 'p'".to_string()
);
}
#[test]
fn invalid_event_id() {
let test_string = r#"["e","ef537f25c895bfa782526529a9b63d97aa631564d5d78c8635fb6c","wss://rsslay.fiatjaf.com"]"#;
let tag: Result<Tag, _> = serde_json::from_str(test_string);
assert_eq!(
tag.err().expect("expect error").to_string(),
"bad hex string length 54 (expected 64)".to_string()
);
}
#[test]
fn invalid_url_type() {
let test_string = r#"["e","ef537f25c895bfa782526529a9b63d97aa631564d5d789c2b765448c8635fb6c", 123456788]"#;
let tag: Result<Tag, _> = serde_json::from_str(test_string);
assert_eq!(
tag.err().expect("expect error").to_string(),
"invalid type: tag json data, expected json string".to_string()
);
}
#[test]
fn invalid_length() {
let test_string = r#"["e","ef537f25c895bfa782526529a9b63d97aa631564d5d789c2b765448c8635fb6c","wss://rsslay.fiatjaf.com", "Random extra data"]"#;
let tag: Result<Tag, _> = serde_json::from_str(test_string);
assert_eq!(
tag.err().expect("expect error").to_string(),
"invalid length 4, expected tag length is 2 or 3".to_string()
);
}
#[test]
fn invalid_json_type() {
let test_string = r#"{"type": "e","event": "ef537f25c895bfa782526529a9b63d97aa631564d5d789c2b765448c8635fb6c"}"#;
let tag: Result<Tag, _> = serde_json::from_str(test_string);
assert_eq!(
tag.err().expect("expect error").to_string(),
"invalid type: tag json object, expected json array".to_string()
);
}
#[test]
fn evenrt_tag_missing_url() {
let test_string =
r#"["e","ef537f25c895bfa782526529a9b63d97aa631564d5d789c2b765448c8635fb6c"]"#;
let tag: Tag = serde_json::from_str(test_string).unwrap();
assert!(tag.get_recomended_url().is_none());
}
#[test]
fn pubkey_tag_missing_url() {
let test_string =
r#"["p","18845781f631c48f1c9709e23092067d06837f30aa0cd0544ac887fe91ddd166"]"#;
let tag: Tag = serde_json::from_str(test_string).unwrap();
assert!(tag.get_recomended_url().is_none());
}
}

72
src/utils.rs Normal file

@@ -0,0 +1,72 @@
//! Common utility functions
use bech32::FromBase32;
use std::time::SystemTime;
use url::Url;
/// Seconds since 1970.
#[must_use]
pub fn unix_time() -> u64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
.unwrap_or(0)
}
/// Check if a string contains only hex characters.
#[must_use]
pub fn is_hex(s: &str) -> bool {
s.chars().all(|x| char::is_ascii_hexdigit(&x))
}
/// Check if string is a nip19 string
pub fn is_nip19(s: &str) -> bool {
s.starts_with("npub") || s.starts_with("note")
}
pub fn nip19_to_hex(s: &str) -> Result<String, bech32::Error> {
let (_hrp, data, _checksum) = bech32::decode(s)?;
let data = Vec::<u8>::from_base32(&data)?;
Ok(hex::encode(data))
}
/// Check if a string contains only lower-case hex chars.
#[must_use]
pub fn is_lower_hex(s: &str) -> bool {
s.chars().all(|x| {
(char::is_ascii_lowercase(&x) || char::is_ascii_digit(&x)) && char::is_ascii_hexdigit(&x)
})
}
pub fn host_str(url: &str) -> Option<String> {
Url::parse(url)
.ok()
.and_then(|u| u.host_str().map(|s| s.to_string()))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn lower_hex() {
let hexstr = "abcd0123";
assert!(is_lower_hex(hexstr));
}
#[test]
fn nip19() {
let hexkey = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d";
let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6";
assert!(!is_nip19(hexkey));
assert!(is_nip19(nip19key));
}
#[test]
fn nip19_hex() {
let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6";
let expected = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d";
let got = nip19_to_hex(nip19key).unwrap();
assert_eq!(expected, got);
}
}

10
tests/cli.rs Normal file

@@ -0,0 +1,10 @@
#[cfg(test)]
mod tests {
use nostr_rs_relay::cli::CLIArgs;
#[test]
fn cli_tests() {
use clap::CommandFactory;
CLIArgs::command().debug_assert();
}
}

107
tests/common/mod.rs Normal file

@@ -0,0 +1,107 @@
use anyhow::{anyhow, Result};
use nostr_rs_relay::config;
use nostr_rs_relay::server::start_server;
//use http::{Request, Response};
use hyper::{Client, StatusCode, Uri};
use std::net::TcpListener;
use std::sync::atomic::{AtomicU16, Ordering};
use std::sync::mpsc as syncmpsc;
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
use std::thread;
use std::thread::JoinHandle;
use std::time::Duration;
use tracing::{debug, info};
pub struct Relay {
pub port: u16,
pub handle: JoinHandle<()>,
pub shutdown_tx: MpscSender<()>,
}
pub fn start_relay() -> Result<Relay> {
// setup tracing
let _trace_sub = tracing_subscriber::fmt::try_init();
info!("Starting a new relay");
// replace default settings
let mut settings = config::Settings::default();
// identify open port
info!("Checking for address...");
let port = get_available_port().unwrap();
info!("Found open port: {}", port);
// bind to local interface only
settings.network.address = "127.0.0.1".to_owned();
settings.network.port = port;
// create an in-memory DB with multiple readers
settings.database.in_memory = true;
settings.database.min_conn = 4;
settings.database.max_conn = 8;
let (shutdown_tx, shutdown_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
let handle = thread::spawn(move || {
// server will block the thread it is run on.
let _ = start_server(&settings, shutdown_rx);
});
// how do we know the relay has finished starting up?
Ok(Relay {
port,
handle,
shutdown_tx,
})
}
// check if the server is healthy via HTTP request
async fn server_ready(relay: &Relay) -> Result<bool> {
let uri: String = format!("http://127.0.0.1:{}/", relay.port);
let client = Client::new();
let uri: Uri = uri.parse().unwrap();
let res = client.get(uri).await?;
Ok(res.status() == StatusCode::OK)
}
pub async fn wait_for_healthy_relay(relay: &Relay) -> Result<()> {
// TODO: maximum time to wait for server to become healthy.
// give it a little time to start up before we start polling
tokio::time::sleep(Duration::from_millis(10)).await;
loop {
let server_check = server_ready(relay).await;
match server_check {
Ok(true) => {
// server responded with 200-OK.
break;
}
Ok(false) => {
// server responded with an error, we're done.
return Err(anyhow!("Got non-200-OK from relay"));
}
Err(_) => {
// server is not yet ready, probably connection refused...
debug!("Relay not ready, will try again...");
tokio::time::sleep(Duration::from_millis(10)).await;
}
}
}
info!("relay is ready");
Ok(())
// simple message sent to web browsers
//let mut request = Request::builder()
// .uri("https://www.rust-lang.org/")
// .header("User-Agent", "my-awesome-agent/1.0");
}
// from https://elliotekj.com/posts/2017/07/25/find-available-tcp-port-rust/
// This needed some modification; if multiple tasks all ask for open ports, they will tend to get the same one.
// instead we should try to try these incrementally/globally.
static PORT_COUNTER: AtomicU16 = AtomicU16::new(4030);
fn get_available_port() -> Option<u16> {
let startsearch = PORT_COUNTER.fetch_add(10, Ordering::SeqCst);
if startsearch >= 20000 {
// wrap around
PORT_COUNTER.store(4030, Ordering::Relaxed);
}
(startsearch..20000).find(|port| port_is_available(*port))
}
pub fn port_is_available(port: u16) -> bool {
info!("checking on port {}", port);
TcpListener::bind(("127.0.0.1", port)).is_ok()
}

356
tests/conn.rs Normal file

@@ -0,0 +1,356 @@
#[cfg(test)]
mod tests {
use bitcoin_hashes::hex::ToHex;
use bitcoin_hashes::sha256;
use bitcoin_hashes::Hash;
use secp256k1::rand;
use secp256k1::{KeyPair, Secp256k1, XOnlyPublicKey};
use nostr_rs_relay::conn::ClientConn;
use nostr_rs_relay::error::Error;
use nostr_rs_relay::event::Event;
use nostr_rs_relay::utils::unix_time;
const RELAY: &str = "wss://nostr.example.com/";
#[test]
fn test_generate_auth_challenge() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let last_auth_challenge = client_conn.auth_challenge().cloned();
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_ne!(
client_conn.auth_challenge().unwrap(),
&last_auth_challenge.unwrap()
);
assert_eq!(client_conn.auth_pubkey(), None);
}
#[test]
fn test_authenticate_with_valid_event() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap();
let event = auth_event(challenge);
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Ok(())));
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey));
}
#[test]
fn test_fail_to_authenticate_in_invalid_state() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let event = auth_event(&"challenge".into());
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_authenticate_when_already_authenticated() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap().clone();
let event = auth_event(&challenge);
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Ok(())));
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey));
let event1 = auth_event(&challenge);
let result1 = client_conn.authenticate(&event1, RELAY);
assert!(matches!(result1, Ok(())));
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey));
assert_ne!(client_conn.auth_pubkey(), Some(&event1.pubkey));
}
#[test]
fn test_fail_to_authenticate_with_invalid_event() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap();
let mut event = auth_event(challenge);
event.sig = event.sig.chars().rev().collect::<String>();
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_with_invalid_event_kind() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap();
let event = auth_event_with_kind(challenge, 9999999999999999);
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_with_expired_timestamp() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap();
let event = auth_event_with_created_at(challenge, unix_time() - 1200); // 20 minutes
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_with_future_timestamp() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap();
let event = auth_event_with_created_at(challenge, unix_time() + 1200); // 20 minutes
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_without_tags() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let event = auth_event_without_tags();
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_without_challenge() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let event = auth_event_without_challenge();
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_without_relay() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap();
let event = auth_event_without_relay(challenge);
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_with_invalid_challenge() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let event = auth_event(&"invalid challenge".into());
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_with_invalid_relay() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap();
let event = auth_event_with_relay(challenge, &"xyz".into());
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
fn auth_event(challenge: &String) -> Event {
create_auth_event(Some(challenge), Some(&RELAY.into()), 22242, unix_time())
}
fn auth_event_with_kind(challenge: &String, kind: u64) -> Event {
create_auth_event(Some(challenge), Some(&RELAY.into()), kind, unix_time())
}
fn auth_event_with_created_at(challenge: &String, created_at: u64) -> Event {
create_auth_event(Some(challenge), Some(&RELAY.into()), 22242, created_at)
}
fn auth_event_without_challenge() -> Event {
create_auth_event(None, Some(&RELAY.into()), 22242, unix_time())
}
fn auth_event_without_relay(challenge: &String) -> Event {
create_auth_event(Some(challenge), None, 22242, unix_time())
}
fn auth_event_without_tags() -> Event {
create_auth_event(None, None, 22242, unix_time())
}
fn auth_event_with_relay(challenge: &String, relay: &String) -> Event {
create_auth_event(Some(challenge), Some(relay), 22242, unix_time())
}
fn create_auth_event(
challenge: Option<&String>,
relay: Option<&String>,
kind: u64,
created_at: u64,
) -> Event {
let secp = Secp256k1::new();
let key_pair = KeyPair::new(&secp, &mut rand::thread_rng());
let public_key = XOnlyPublicKey::from_keypair(&key_pair);
let mut tags: Vec<Vec<String>> = vec![];
if let Some(c) = challenge {
let tag = vec!["challenge".into(), c.into()];
tags.push(tag);
}
if let Some(r) = relay {
let tag = vec!["relay".into(), r.into()];
tags.push(tag);
}
let mut event = Event {
id: "0".to_owned(),
pubkey: public_key.to_hex(),
delegated_by: None,
created_at,
kind,
tags,
content: "".to_owned(),
sig: "0".to_owned(),
tagidx: None,
};
let c = event.to_canonical().unwrap();
let digest: sha256::Hash = sha256::Hash::hash(c.as_bytes());
let msg = secp256k1::Message::from_slice(digest.as_ref()).unwrap();
let sig = secp.sign_schnorr(&msg, &key_pair);
event.id = format!("{digest:x}");
event.sig = sig.to_hex();
event
}
}

79
tests/integration_test.rs Normal file

@@ -0,0 +1,79 @@
use anyhow::Result;
use futures::SinkExt;
use futures::StreamExt;
use std::thread;
use std::time::Duration;
use tokio_tungstenite::connect_async;
use tracing::info;
mod common;
#[tokio::test]
async fn start_and_stop() -> Result<()> {
// this will be the common pattern for acquiring a new relay:
// start a fresh relay, on a port to-be-provided back to us:
let relay = common::start_relay()?;
// wait for the relay's webserver to start up and deliver a page:
common::wait_for_healthy_relay(&relay).await?;
let port = relay.port;
// just make sure we can startup and shut down.
// if we send a shutdown message before the server is listening,
// we will get a SendError. Keep sending until someone is
// listening.
loop {
let shutdown_res = relay.shutdown_tx.send(());
match shutdown_res {
Ok(()) => {
break;
}
Err(_) => {
thread::sleep(Duration::from_millis(100));
}
}
}
// wait for relay to shutdown
let thread_join = relay.handle.join();
assert!(thread_join.is_ok());
// assert that port is now available.
assert!(common::port_is_available(port));
Ok(())
}
#[tokio::test]
async fn relay_home_page() -> Result<()> {
// get a relay and wait for startup...
let relay = common::start_relay()?;
common::wait_for_healthy_relay(&relay).await?;
// tell relay to shutdown
let _res = relay.shutdown_tx.send(());
Ok(())
}
//#[tokio::test]
// Still inwork
async fn publish_test() -> Result<()> {
// get a relay and wait for startup
let relay = common::start_relay()?;
common::wait_for_healthy_relay(&relay).await?;
// open a non-secure websocket connection.
let (mut ws, _res) = connect_async(format!("ws://localhost:{}", relay.port)).await?;
// send a simple pre-made message
let simple_event = r#"["EVENT", {"content": "hello world","created_at": 1691239763,
"id":"f3ce6798d70e358213ebbeba4886bbdfacf1ecfd4f65ee5323ef5f404de32b86",
"kind": 1,
"pubkey": "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798",
"sig": "30ca29e8581eeee75bf838171dec818af5e6de2b74f5337de940f5cc91186534c0b20d6cf7ad1043a2c51dbd60b979447720a471d346322103c83f6cb66e4e98",
"tags": []}]"#;
ws.send(simple_event.into()).await?;
// get response from server, confirm it is an array with first element "OK"
let event_confirm = ws.next().await;
ws.close(None).await?;
info!("event confirmed: {:?}", event_confirm);
// open a new connection, and wait for some time to get the event.
let (mut sub_ws, _res) = connect_async(format!("ws://localhost:{}", relay.port)).await?;
let event_sub = r#"["REQ", "simple", {}]"#;
sub_ws.send(event_sub.into()).await?;
// read from subscription
let _ws_next = sub_ws.next().await;
let _res = relay.shutdown_tx.send(());
Ok(())
}