Compare commits

...

109 Commits

Author SHA1 Message Date
Laszlo Megyer
b04ab76e73 fix: postgresql tag filtering for odd-length hex-looking values
The tag filtering code misses odd-length strings that contains only hex digits [0-9a-f].
This fix makes the condition for `has_plain_values` the inverse of the condition for `has_hex_values`.

Fixes #191
2024-04-03 02:51:12 +00:00
Greg Heartsfield
39a3a258a0 refactor: clippy suggestions 2024-03-28 10:15:00 -05:00
Greg Heartsfield
44c6e3d88b improvement: upgrade dependencies
Updating aes v0.8.3 -> v0.8.4
Removing ahash v0.7.7
Removing ahash v0.8.6
Adding ahash v0.7.8
Adding ahash v0.8.11
Updating aho-corasick v1.1.2 -> v1.1.3
Updating anstream v0.6.4 -> v0.6.13
Updating anstyle v1.0.4 -> v1.0.6
Updating anstyle-parse v0.2.2 -> v0.2.3
Updating anstyle-query v1.0.0 -> v1.0.2
Updating anstyle-wincon v3.0.1 -> v3.0.2
Updating anyhow v1.0.75 -> v1.0.81
Updating async-channel v2.1.1 -> v2.2.0
Updating async-global-executor v2.4.0 -> v2.4.1
Updating async-io v2.2.1 -> v2.3.2
Updating async-lock v3.1.2 -> v3.3.0
Updating async-task v4.5.0 -> v4.7.0
Updating async-trait v0.1.74 -> v0.1.79
Updating autocfg v1.1.0 -> v1.2.0
Updating backtrace v0.3.69 -> v0.3.71
Updating base64 v0.21.5 -> v0.21.7
Updating bitflags v2.4.1 -> v2.5.0
Updating bumpalo v3.14.0 -> v3.15.4
Updating bytes v1.5.0 -> v1.6.0
Updating cc v1.0.83 -> v1.0.90
Updating chrono v0.4.31 -> v0.4.37
Updating clap v4.4.10 -> v4.5.4
Updating clap_builder v4.4.9 -> v4.5.2
Updating clap_derive v4.4.7 -> v4.5.4
Updating clap_lex v0.6.0 -> v0.7.0
Updating concurrent-queue v2.3.0 -> v2.4.0
Updating console v0.15.7 -> v0.15.8
Updating cpufeatures v0.2.11 -> v0.2.12
Updating crc32fast v1.3.2 -> v1.4.0
Updating crossbeam-channel v0.5.8 -> v0.5.12
Updating crossbeam-queue v0.3.8 -> v0.3.11
Updating crossbeam-utils v0.8.16 -> v0.8.19
Updating deranged v0.3.9 -> v0.3.11
Updating either v1.9.0 -> v1.10.0
Removing event-listener v4.0.0
Adding event-listener v4.0.3
Adding event-listener v5.2.0
Adding event-listener-strategy v0.5.0
Updating fastrand v2.0.1 -> v2.0.2
Updating futures v0.3.29 -> v0.3.30
Updating futures-channel v0.3.29 -> v0.3.30
Updating futures-core v0.3.29 -> v0.3.30
Updating futures-executor v0.3.29 -> v0.3.30
Updating futures-io v0.3.29 -> v0.3.30
Updating futures-lite v2.0.1 -> v2.3.0
Updating futures-macro v0.3.29 -> v0.3.30
Updating futures-sink v0.3.29 -> v0.3.30
Updating futures-task v0.3.29 -> v0.3.30
Updating futures-timer v3.0.2 -> v3.0.3
Updating futures-util v0.3.29 -> v0.3.30
Updating getrandom v0.2.11 -> v0.2.12
Updating h2 v0.3.22 -> v0.3.25
Adding heck v0.5.0
Updating hermit-abi v0.3.3 -> v0.3.9
Updating hkdf v0.12.3 -> v0.12.4
Updating home v0.5.5 -> v0.5.9
Updating http v0.2.11 -> v0.2.12
Updating http-body v0.4.5 -> v0.4.6
Updating hyper v0.14.27 -> v0.14.28
Updating iana-time-zone v0.1.58 -> v0.1.60
Updating indexmap v2.1.0 -> v2.2.6
Updating indicatif v0.17.7 -> v0.17.8
Updating itertools v0.11.0 -> v0.12.1
Updating itoa v1.0.9 -> v1.0.11
Updating js-sys v0.3.66 -> v0.3.69
Updating libc v0.2.150 -> v0.2.153
Updating linux-raw-sys v0.4.12 -> v0.4.13
Updating log v0.4.20 -> v0.4.21
Updating memchr v2.6.4 -> v2.7.2
Updating miniz_oxide v0.7.1 -> v0.7.2
Updating mio v0.8.9 -> v0.8.11
Adding num-conv v0.1.0
Updating num-integer v0.1.45 -> v0.1.46
Updating num-iter v0.1.43 -> v0.1.44
Updating num-traits v0.2.17 -> v0.2.18
Updating object v0.32.1 -> v0.32.2
Updating once_cell v1.18.0 -> v1.19.0
Updating pest v2.7.5 -> v2.7.8
Updating pest_derive v2.7.5 -> v2.7.8
Updating pest_generator v2.7.5 -> v2.7.8
Updating pest_meta v2.7.5 -> v2.7.8
Updating pin-project v1.1.3 -> v1.1.5
Updating pin-project-internal v1.1.3 -> v1.1.5
Updating pkg-config v0.3.27 -> v0.3.30
Updating polling v3.3.1 -> v3.6.0
Updating portable-atomic v1.5.1 -> v1.6.0
Updating proc-macro2 v1.0.70 -> v1.0.79
Updating quote v1.0.33 -> v1.0.35
Updating regex v1.10.2 -> v1.10.4
Updating regex-automata v0.4.3 -> v0.4.6
Updating regex-syntax v0.8.2 -> v0.8.3
Updating ring v0.17.6 -> v0.17.8
Updating rustix v0.38.26 -> v0.38.32
Updating rustls v0.21.9 -> v0.21.10
Updating ryu v1.0.15 -> v1.0.17
Updating schannel v0.1.22 -> v0.1.23
Updating serde v1.0.193 -> v1.0.197
Updating serde_derive v1.0.193 -> v1.0.197
Updating serde_json v1.0.108 -> v1.0.115
Updating smallvec v1.11.2 -> v1.13.2
Updating socket2 v0.5.5 -> v0.5.6
Updating sqlformat v0.2.2 -> v0.2.3
Updating strsim v0.10.0 -> v0.11.0
Updating syn v2.0.39 -> v2.0.55
Updating tempfile v3.8.1 -> v3.10.1
Updating thiserror v1.0.50 -> v1.0.58
Updating thiserror-impl v1.0.50 -> v1.0.58
Updating thread_local v1.1.7 -> v1.1.8
Updating time v0.3.30 -> v0.3.34
Updating time-macros v0.2.15 -> v0.2.17
Updating tokio v1.34.0 -> v1.36.0
Updating tokio-stream v0.1.14 -> v0.1.15
Updating try-lock v0.2.4 -> v0.2.5
Updating unicode-bidi v0.3.13 -> v0.3.15
Updating unicode-normalization v0.1.22 -> v0.1.23
Updating unicode-segmentation v1.10.1 -> v1.11.0
Updating uuid v1.6.1 -> v1.8.0
Updating value-bag v1.4.2 -> v1.8.1
Adding wasite v0.1.0
Updating wasm-bindgen v0.2.89 -> v0.2.92
Updating wasm-bindgen-backend v0.2.89 -> v0.2.92
Updating wasm-bindgen-futures v0.4.39 -> v0.4.42
Updating wasm-bindgen-macro v0.2.89 -> v0.2.92
Updating wasm-bindgen-macro-support v0.2.89 -> v0.2.92
Updating wasm-bindgen-shared v0.2.89 -> v0.2.92
Updating web-sys v0.3.66 -> v0.3.69
Updating whoami v1.4.1 -> v1.5.1
Updating windows-core v0.51.1 -> v0.52.0
Removing windows-sys v0.45.0
Removing windows-targets v0.42.2
Removing windows-targets v0.52.0
Adding windows-targets v0.52.4
Removing windows_aarch64_gnullvm v0.42.2
Removing windows_aarch64_gnullvm v0.52.0
Adding windows_aarch64_gnullvm v0.52.4
Removing windows_aarch64_msvc v0.42.2
Removing windows_aarch64_msvc v0.52.0
Adding windows_aarch64_msvc v0.52.4
Removing windows_i686_gnu v0.42.2
Removing windows_i686_gnu v0.52.0
Adding windows_i686_gnu v0.52.4
Removing windows_i686_msvc v0.42.2
Removing windows_i686_msvc v0.52.0
Adding windows_i686_msvc v0.52.4
Removing windows_x86_64_gnu v0.42.2
Removing windows_x86_64_gnu v0.52.0
Adding windows_x86_64_gnu v0.52.4
Removing windows_x86_64_gnullvm v0.42.2
Removing windows_x86_64_gnullvm v0.52.0
Adding windows_x86_64_gnullvm v0.52.4
Removing windows_x86_64_msvc v0.42.2
Removing windows_x86_64_msvc v0.52.0
Adding windows_x86_64_msvc v0.52.4
Updating zerocopy v0.7.28 -> v0.7.32
Updating zerocopy-derive v0.7.28 -> v0.7.32
2024-03-28 09:52:58 -05:00
Laszlo Megyer
767b76b2b3 fix: author filter in SQLite queries use correct blob type
https://todo.sr.ht/~gheartsfield/nostr-rs-relay/79
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2024-03-28 09:40:17 -05:00
Greg Heartsfield
c5fb16cd98 improvement: describe migration step that failed 2023-12-09 09:51:09 -06:00
Kieran
9c86f03902 refactor: drop hexrange
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-12-03 12:52:03 -06:00
Greg Heartsfield
971889f9a6 improvement: disable limit_scrapers by default
This is a good feature, but will limit valid requests from being
served.  Defaulting this to off will be less surprising to relay ops.
2023-12-03 10:51:59 -06:00
Kieran
388eadf880 feat: limit_scrapers
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-12-03 10:51:49 -06:00
Greg Heartsfield
1ce029860c docs: ensure latest Rust base image is pulled 2023-12-02 09:27:27 -06:00
Greg Heartsfield
b7e10e26a2 improvement: upgrade dependencies
Adding async-channel v2.1.1
Updating async-executor v1.6.0 -> v1.8.0
Updating async-global-executor v2.3.1 -> v2.4.0
Adding async-io v2.2.1
Adding async-lock v3.1.2
Updating blocking v1.4.1 -> v1.5.1
Updating clap v4.4.7 -> v4.4.10
Updating clap_builder v4.4.7 -> v4.4.9
Updating core-foundation v0.9.3 -> v0.9.4
Updating core-foundation-sys v0.8.4 -> v0.8.6
Updating errno v0.3.5 -> v0.3.8
Adding event-listener v4.0.0
Adding event-listener-strategy v0.4.0
Updating form_urlencoded v1.2.0 -> v1.2.1
Adding futures-lite v2.0.1
Updating getrandom v0.2.10 -> v0.2.11
Updating gimli v0.28.0 -> v0.28.1
Updating h2 v0.3.21 -> v0.3.22
Updating hashbrown v0.14.2 -> v0.14.3
Updating hdrhistogram v7.5.2 -> v7.5.4
Updating http v0.2.9 -> v0.2.11
Updating idna v0.4.0 -> v0.5.0
Updating js-sys v0.3.65 -> v0.3.66
Updating linux-raw-sys v0.4.10 -> v0.4.12
Updating percent-encoding v2.3.0 -> v2.3.1
Adding polling v3.3.1
Updating proc-macro2 v1.0.69 -> v1.0.70
Updating ring v0.17.5 -> v0.17.6
Updating rustix v0.38.21 -> v0.38.26
Updating rustls v0.21.8 -> v0.21.9
Updating rustls-pemfile v1.0.3 -> v1.0.4
Updating serde v1.0.190 -> v1.0.193
Updating serde_derive v1.0.190 -> v1.0.193
Updating smallvec v1.11.1 -> v1.11.2
Updating syn v2.0.38 -> v2.0.39
Updating tokio v1.33.0 -> v1.34.0
Updating tokio-macros v2.1.0 -> v2.2.0
Updating tracing-appender v0.2.2 -> v0.2.3
Updating tracing-log v0.1.4 -> v0.2.0
Updating tracing-subscriber v0.3.17 -> v0.3.18
Updating url v2.4.1 -> v2.5.0
Updating uuid v1.5.0 -> v1.6.1
Updating wasm-bindgen v0.2.88 -> v0.2.89
Updating wasm-bindgen-backend v0.2.88 -> v0.2.89
Updating wasm-bindgen-futures v0.4.38 -> v0.4.39
Updating wasm-bindgen-macro v0.2.88 -> v0.2.89
Updating wasm-bindgen-macro-support v0.2.88 -> v0.2.89
Updating wasm-bindgen-shared v0.2.88 -> v0.2.89
Updating web-sys v0.3.65 -> v0.3.66
Adding windows-sys v0.52.0
Adding windows-targets v0.52.0
Adding windows_aarch64_gnullvm v0.52.0
Adding windows_aarch64_msvc v0.52.0
Adding windows_i686_gnu v0.52.0
Adding windows_i686_msvc v0.52.0
Adding windows_x86_64_gnu v0.52.0
Adding windows_x86_64_gnullvm v0.52.0
Adding windows_x86_64_msvc v0.52.0
Updating zerocopy v0.7.25 -> v0.7.28
Updating zerocopy-derive v0.7.25 -> v0.7.28
2023-12-01 17:51:17 -06:00
Kieran
ab736f5f98 fix: abort query builder for empty arrays
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-12-01 17:22:14 -06:00
Kieran
b4471a6698 fix: multi-tag query
fixes https://github.com/scsibug/nostr-rs-relay/issues/102

Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-12-01 17:22:01 -06:00
Kieran
7120de4ff8 feat: restricted_writes
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-11-23 13:27:27 -06:00
thesimplekid
4ff77ab537 improvement: config to disable dm/invoice creation
While sending dms to users who are not signed up
but have attempted to publish events, it is now
disabled by default. This stops the creation of
extra invoices for pubkeys that may have no
intention of signing up for the relay. It also
reduced the number of dms that are created.

Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-11-23 13:22:41 -06:00
Carsten Otto
84f60f0abc improvement(NIP-11): mention requirements for admin contact pubkey
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-11-23 13:14:45 -06:00
Greg Heartsfield
8a67770206 improvement: update dependencies
Removing ahash v0.4.7
Removing ahash v0.7.6
Removing ahash v0.8.3
Adding ahash v0.4.8
Adding ahash v0.7.7
Adding ahash v0.8.6
Updating aho-corasick v1.0.5 -> v1.1.2
Updating anstream v0.5.0 -> v0.6.4
Updating anstyle v1.0.2 -> v1.0.4
Updating anstyle-parse v0.2.1 -> v0.2.2
Updating anstyle-wincon v2.1.0 -> v3.0.1
Updating async-executor v1.5.1 -> v1.6.0
Updating async-task v4.4.0 -> v4.5.0
Updating async-trait v0.1.73 -> v0.1.74
Updating atomic-waker v1.1.1 -> v1.1.2
Updating base64 v0.21.3 -> v0.21.5
Updating bitflags v2.4.0 -> v2.4.1
Updating blocking v1.3.1 -> v1.4.1
Updating bumpalo v3.13.0 -> v3.14.0
Updating byteorder v1.4.3 -> v1.5.0
Updating bytes v1.4.0 -> v1.5.0
Updating chrono v0.4.28 -> v0.4.31
Updating clap v4.4.2 -> v4.4.7
Updating clap_builder v4.4.2 -> v4.4.7
Updating clap_derive v4.4.2 -> v4.4.7
Updating clap_lex v0.5.1 -> v0.6.0
Updating concurrent-queue v2.2.0 -> v2.3.0
Updating const_format v0.2.31 -> v0.2.32
Updating const_format_proc_macros v0.2.31 -> v0.2.32
Updating cpufeatures v0.2.9 -> v0.2.11
Updating crc-catalog v2.2.0 -> v2.4.0
Updating deranged v0.3.8 -> v0.3.9
Removing dirs v5.0.1
Removing dirs-sys v0.4.1
Updating errno v0.3.3 -> v0.3.5
Removing errno-dragonfly v0.1.2
Updating fastrand v2.0.0 -> v2.0.1
Adding finl_unicode v1.2.0
Updating flate2 v1.0.27 -> v1.0.28
Updating futures v0.3.28 -> v0.3.29
Updating futures-channel v0.3.28 -> v0.3.29
Updating futures-core v0.3.28 -> v0.3.29
Updating futures-executor v0.3.28 -> v0.3.29
Updating futures-io v0.3.28 -> v0.3.29
Updating futures-macro v0.3.28 -> v0.3.29
Updating futures-sink v0.3.28 -> v0.3.29
Updating futures-task v0.3.28 -> v0.3.29
Updating futures-util v0.3.28 -> v0.3.29
Updating hashbrown v0.14.0 -> v0.14.2
Updating hermit-abi v0.3.2 -> v0.3.3
Adding home v0.5.5
Updating hyper-rustls v0.24.1 -> v0.24.2
Updating iana-time-zone v0.1.57 -> v0.1.58
Updating indexmap v2.0.0 -> v2.1.0
Updating indicatif v0.17.6 -> v0.17.7
Updating js-sys v0.3.64 -> v0.3.65
Updating libc v0.2.147 -> v0.2.150
Adding libredox v0.0.1
Updating linux-raw-sys v0.4.5 -> v0.4.10
Updating lock_api v0.4.10 -> v0.4.11
Updating matchit v0.7.2 -> v0.7.3
Updating md-5 v0.10.5 -> v0.10.6
Updating memchr v2.6.3 -> v2.6.4
Updating mio v0.8.8 -> v0.8.9
Updating num-traits v0.2.16 -> v0.2.17
Removing option-ext v0.2.0
Updating parking v2.1.0 -> v2.2.0
Updating parking_lot_core v0.9.8 -> v0.9.9
Updating pest v2.7.3 -> v2.7.5
Updating pest_derive v2.7.3 -> v2.7.5
Updating pest_generator v2.7.3 -> v2.7.5
Updating pest_meta v2.7.3 -> v2.7.5
Adding piper v0.2.1
Updating portable-atomic v1.4.3 -> v1.5.1
Adding powerfmt v0.2.0
Updating proc-macro2 v1.0.66 -> v1.0.69
Updating redox_syscall v0.3.5 -> v0.4.1
Updating redox_users v0.4.3 -> v0.4.4
Updating regex v1.9.5 -> v1.10.2
Updating regex-automata v0.3.8 -> v0.4.3
Updating regex-syntax v0.7.5 -> v0.8.2
Adding ring v0.17.5
Removing rustix v0.37.23
Removing rustix v0.38.11
Adding rustix v0.37.27
Adding rustix v0.38.21
Updating rustls v0.21.7 -> v0.21.8
Updating rustls-webpki v0.101.4 -> v0.101.7
Updating sct v0.7.0 -> v0.7.1
Updating serde v1.0.188 -> v1.0.190
Updating serde_derive v1.0.188 -> v1.0.190
Updating serde_json v1.0.105 -> v1.0.108
Updating sha1 v0.10.5 -> v0.10.6
Updating sha2 v0.10.7 -> v0.10.8
Updating sharded-slab v0.1.4 -> v0.1.7
Updating smallvec v1.11.0 -> v1.11.1
Removing socket2 v0.4.9
Removing socket2 v0.5.3
Adding socket2 v0.4.10
Adding socket2 v0.5.5
Adding spin v0.9.8
Updating stringprep v0.1.3 -> v0.1.4
Updating syn v2.0.31 -> v2.0.38
Updating tempfile v3.8.0 -> v3.8.1
Updating thiserror v1.0.48 -> v1.0.50
Updating thiserror-impl v1.0.48 -> v1.0.50
Removing time v0.1.43
Removing time v0.3.28
Adding time v0.3.30
Updating time-core v0.1.1 -> v0.1.2
Updating time-macros v0.2.14 -> v0.2.15
Updating tokio v1.32.0 -> v1.33.0
Updating tokio-util v0.7.8 -> v0.7.10
Updating tracing v0.1.37 -> v0.1.40
Updating tracing-attributes v0.1.26 -> v0.1.27
Updating tracing-core v0.1.31 -> v0.1.32
Updating tracing-log v0.1.3 -> v0.1.4
Updating typenum v1.16.0 -> v1.17.0
Updating unicode-ident v1.0.11 -> v1.0.12
Updating unicode-width v0.1.10 -> v0.1.11
Adding untrusted v0.9.0
Updating uuid v1.4.1 -> v1.5.0
Updating value-bag v1.4.1 -> v1.4.2
Updating waker-fn v1.1.0 -> v1.1.1
Updating wasm-bindgen v0.2.87 -> v0.2.88
Updating wasm-bindgen-backend v0.2.87 -> v0.2.88
Updating wasm-bindgen-futures v0.4.37 -> v0.4.38
Updating wasm-bindgen-macro v0.2.87 -> v0.2.88
Updating wasm-bindgen-macro-support v0.2.87 -> v0.2.88
Updating wasm-bindgen-shared v0.2.87 -> v0.2.88
Updating web-sys v0.3.64 -> v0.3.65
Updating webpki v0.22.1 -> v0.22.4
Updating which v4.4.1 -> v4.4.2
Removing windows v0.48.0
Adding windows-core v0.51.1
Adding zerocopy v0.7.25
Adding zerocopy-derive v0.7.25
2023-11-05 09:54:07 -06:00
thesimplekid
7650f5f4a3 fix: relay fee in msats
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-11-03 18:30:01 -05:00
Yuki Kishimoto
a7b169c0d3 fix: send OK message for ephemeral events
4b9f13d983/01.md (L153)
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-11-03 18:19:25 -05:00
Kieran
24b1705a08 fix: value_hex tag query
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-11-03 18:17:05 -05:00
benthecarman
9d0a98f8bf docs: add line for enabling systemd service
Signed-off-by: Greg Heartsfield <scsibug@imap.cc>
2023-11-03 18:15:25 -05:00
Greg Heartsfield
26f296f76f build: bump version to 0.8.13 2023-09-04 10:03:16 -05:00
Greg Heartsfield
c3c9b5dcd2 improvement: remove openssl dependency 2023-09-04 07:53:58 -05:00
Greg Heartsfield
da29bdd837 test: fix broken connection tests 2023-09-04 07:24:41 -05:00
Greg Heartsfield
bacb85024c improvement: update dependencies
Updating addr2line v0.20.0 -> v0.21.0
Updating aho-corasick v1.0.2 -> v1.0.5
Updating anstream v0.3.2 -> v0.5.0
Updating anstyle v1.0.1 -> v1.0.2
Updating anstyle-wincon v1.0.2 -> v2.1.0
Updating anyhow v1.0.72 -> v1.0.75
Updating async-lock v2.7.0 -> v2.8.0
Updating async-trait v0.1.72 -> v0.1.73
Updating backtrace v0.3.68 -> v0.3.69
Updating base64 v0.21.2 -> v0.21.3
Updating bitflags v2.3.3 -> v2.4.0
Updating cc v1.0.82 -> v1.0.83
Updating chrono v0.4.26 -> v0.4.28
Updating clap v4.3.21 -> v4.4.2
Updating clap_builder v4.3.21 -> v4.4.2
Updating clap_derive v4.3.12 -> v4.4.2
Updating clap_lex v0.5.0 -> v0.5.1
Updating dashmap v5.5.0 -> v5.5.3
Updating deranged v0.3.7 -> v0.3.8
Adding dirs v5.0.1
Adding dirs-sys v0.4.1
Updating errno v0.3.2 -> v0.3.3
Updating flate2 v1.0.26 -> v1.0.27
Updating gimli v0.27.3 -> v0.28.0
Updating h2 v0.3.20 -> v0.3.21
Updating hashlink v0.8.3 -> v0.8.4
Updating httpdate v1.0.2 -> v1.0.3
Removing is-terminal v0.4.9
Adding itertools v0.11.0
Updating log v0.4.19 -> v0.4.20
Updating memchr v2.5.0 -> v2.6.3
Updating object v0.31.1 -> v0.32.1
Updating openssl v0.10.56 -> v0.10.57
Updating openssl-sys v0.9.91 -> v0.9.92
Adding option-ext v0.2.0
Updating pest v2.7.2 -> v2.7.3
Updating pest_derive v2.7.2 -> v2.7.3
Updating pest_generator v2.7.2 -> v2.7.3
Updating pest_meta v2.7.2 -> v2.7.3
Updating petgraph v0.6.3 -> v0.6.4
Updating pin-project-lite v0.2.12 -> v0.2.13
Updating portable-atomic v1.4.2 -> v1.4.3
Updating quote v1.0.32 -> v1.0.33
Updating regex v1.9.3 -> v1.9.5
Updating regex-automata v0.3.6 -> v0.3.8
Updating regex-syntax v0.7.4 -> v0.7.5
Updating rustix v0.38.7 -> v0.38.11
Updating rustls v0.20.8 -> v0.20.9
Updating serde v1.0.183 -> v1.0.188
Updating serde_derive v1.0.183 -> v1.0.188
Updating serde_json v1.0.104 -> v1.0.105
Updating slab v0.4.8 -> v0.4.9
Updating sqlformat v0.2.1 -> v0.2.2
Updating syn v2.0.28 -> v2.0.31
Updating tempfile v3.7.1 -> v3.8.0
Updating thiserror v1.0.44 -> v1.0.48
Updating thiserror-impl v1.0.44 -> v1.0.48
Removing time v0.1.45
Removing time v0.3.25
Adding time v0.1.43
Adding time v0.3.28
Updating time-macros v0.2.11 -> v0.2.14
Updating tokio v1.30.0 -> v1.32.0
Updating url v2.4.0 -> v2.4.1
Updating wasi v0.10.0+wasi-snapshot-preview1 -> v0.10.2+wasi-snapshot-preview1
Updating webpki v0.22.0 -> v0.22.1
Updating which v4.4.0 -> v4.4.1
Updating windows-targets v0.48.1 -> v0.48.5
Updating windows_aarch64_gnullvm v0.48.0 -> v0.48.5
Updating windows_aarch64_msvc v0.48.0 -> v0.48.5
Updating windows_i686_gnu v0.48.0 -> v0.48.5
Updating windows_i686_msvc v0.48.0 -> v0.48.5
Updating windows_x86_64_gnu v0.48.0 -> v0.48.5
Updating windows_x86_64_gnullvm v0.48.0 -> v0.48.5
Updating windows_x86_64_msvc v0.48.0 -> v0.48.5
2023-09-04 06:52:02 -05:00
Wspsxing
7a77c459bb fix: panic on malformed signature 2023-09-04 06:48:26 -05:00
Greg Heartsfield
34c8b04926 improvement: update dependencies
Updating crates.io index
Updating anstyle-wincon v1.0.1 -> v1.0.2
Updating cc v1.0.81 -> v1.0.82
Updating clap v4.3.19 -> v4.3.21
Updating clap_builder v4.3.19 -> v4.3.21
Updating openssl v0.10.55 -> v0.10.56
Updating openssl-sys v0.9.90 -> v0.9.91
Updating pin-project v1.1.2 -> v1.1.3
Updating pin-project-internal v1.1.2 -> v1.1.3
Updating pin-project-lite v0.2.10 -> v0.2.12
Updating regex v1.9.1 -> v1.9.3
Updating regex-automata v0.3.4 -> v0.3.6
Updating rustix v0.38.6 -> v0.38.7
Updating serde v1.0.181 -> v1.0.183
Updating serde_derive v1.0.181 -> v1.0.183
Adding socket2 v0.5.3
Updating tempfile v3.7.0 -> v3.7.1
Updating tokio v1.29.1 -> v1.30.0
2023-08-09 15:00:34 -07:00
Greg Heartsfield
1032a51220 refactor: clippy suggestions 2023-08-09 14:59:39 -07:00
Václav Navrátil
79abd981e1 fix: build gRPC server code
This will allow the gRPC example to compile.

Fix for https://github.com/scsibug/nostr-rs-relay/issues/141
2023-08-09 13:24:52 -07:00
rorp
b1957ab2b1 feat(NIP-42): extend authz to NIP-44 DMs and NIP-59 gift wraps 2023-08-09 13:11:03 -07:00
Greg Heartsfield
23aa6e7313 docs: sqlite in-memory mode is false by default 2023-08-09 13:09:11 -07:00
Greg Heartsfield
fb751ba252 build: bump version to 0.8.12 2023-08-05 15:33:14 -05:00
Greg Heartsfield
7c5e851b82 fix: reset in-memory config to be false 2023-08-05 15:29:16 -05:00
Greg Heartsfield
f965c53434 build: bump versiot to 0.8.11 2023-08-05 11:42:14 -05:00
Greg Heartsfield
74376d94e5 improvement: upgrade multiple dependencies
Adding addr2line v0.20.0
Updating aes v0.8.2 -> v0.8.3
Adding ahash v0.8.3
Updating aho-corasick v0.7.20 -> v1.0.2
Adding allocator-api2 v0.2.16
Adding android-tzdata v0.1.1
Adding anstream v0.3.2
Adding anstyle v1.0.1
Adding anstyle-parse v0.2.1
Adding anstyle-query v1.0.0
Adding anstyle-wincon v1.0.1
Updating anyhow v1.0.69 -> v1.0.72
Updating async-channel v1.8.0 -> v1.9.0
Updating async-executor v1.5.0 -> v1.5.1
Updating async-io v1.12.0 -> v1.13.0
Updating async-lock v2.6.0 -> v2.7.0
Updating async-stream v0.3.3 -> v0.3.5
Updating async-stream-impl v0.3.3 -> v0.3.5
Updating async-task v4.3.0 -> v4.4.0
Updating async-trait v0.1.64 -> v0.1.72
Updating atomic-waker v1.1.0 -> v1.1.1
Updating axum v0.6.6 -> v0.6.20
Updating axum-core v0.3.2 -> v0.3.4
Adding backtrace v0.3.68
Updating base64 v0.21.0 -> v0.21.2
Adding bitflags v2.3.3
Updating block-buffer v0.10.3 -> v0.10.4
Updating block-padding v0.3.2 -> v0.3.3
Updating blocking v1.3.0 -> v1.3.1
Updating bumpalo v3.12.0 -> v3.13.0
Updating cc v1.0.79 -> v1.0.81
Updating chrono v0.4.23 -> v0.4.26
Updating cipher v0.4.3 -> v0.4.4
Updating clap v4.1.4 -> v4.3.19
Adding clap_builder v4.3.19
Updating clap_derive v4.1.0 -> v4.3.12
Updating clap_lex v0.3.1 -> v0.5.0
Removing codespan-reporting v0.11.1
Adding colorchoice v1.0.0
Updating concurrent-queue v2.1.0 -> v2.2.0
Updating console v0.15.5 -> v0.15.7
Updating console-api v0.4.0 -> v0.5.0
Updating console-subscriber v0.1.8 -> v0.1.10
Updating const_format v0.2.30 -> v0.2.31
Updating const_format_proc_macros v0.2.29 -> v0.2.31
Updating core-foundation-sys v0.8.3 -> v0.8.4
Updating cpufeatures v0.2.5 -> v0.2.9
Updating crossbeam-channel v0.5.6 -> v0.5.8
Updating crossbeam-utils v0.8.14 -> v0.8.16
Removing ctor v0.1.26
Removing cxx v1.0.90
Removing cxx-build v1.0.90
Removing cxxbridge-flags v1.0.90
Removing cxxbridge-macro v1.0.90
Updating dashmap v5.4.0 -> v5.5.0
Adding deranged v0.3.7
Updating digest v0.10.6 -> v0.10.7
Updating dotenvy v0.15.6 -> v0.15.7
Updating either v1.8.1 -> v1.9.0
Adding equivalent v1.0.1
Updating errno v0.2.8 -> v0.3.2
Removing fastrand v1.8.0
Adding fastrand v1.9.0
Adding fastrand v2.0.0
Updating flate2 v1.0.25 -> v1.0.26
Updating form_urlencoded v1.1.0 -> v1.2.0
Updating futures v0.3.26 -> v0.3.28
Updating futures-channel v0.3.26 -> v0.3.28
Updating futures-core v0.3.26 -> v0.3.28
Updating futures-executor v0.3.26 -> v0.3.28
Updating futures-io v0.3.26 -> v0.3.28
Updating futures-lite v1.12.0 -> v1.13.0
Updating futures-macro v0.3.26 -> v0.3.28
Updating futures-sink v0.3.26 -> v0.3.28
Updating futures-task v0.3.26 -> v0.3.28
Updating futures-util v0.3.26 -> v0.3.28
Updating generic-array v0.14.6 -> v0.14.7
Updating getrandom v0.2.8 -> v0.2.10
Adding gimli v0.27.3
Updating h2 v0.3.15 -> v0.3.20
Adding hashbrown v0.14.0
Updating hashlink v0.8.1 -> v0.8.3
Removing hermit-abi v0.2.6
Removing hermit-abi v0.3.1
Adding hermit-abi v0.3.2
Updating http v0.2.8 -> v0.2.9
Removing http-range-header v0.3.0
Updating hyper v0.14.24 -> v0.14.27
Updating iana-time-zone v0.1.53 -> v0.1.57
Updating iana-time-zone-haiku v0.1.1 -> v0.1.2
Updating idna v0.3.0 -> v0.4.0
Removing indexmap v1.9.2
Adding indexmap v1.9.3
Adding indexmap v2.0.0
Updating indicatif v0.17.3 -> v0.17.6
Updating io-lifetimes v1.0.5 -> v1.0.11
Updating is-terminal v0.4.3 -> v0.4.9
Updating itoa v1.0.5 -> v1.0.9
Updating js-sys v0.3.61 -> v0.3.64
Updating libc v0.2.139 -> v0.2.147
Removing link-cplusplus v1.0.8
Removing linux-raw-sys v0.1.4
Adding linux-raw-sys v0.3.8
Adding linux-raw-sys v0.4.5
Updating lock_api v0.4.9 -> v0.4.10
Updating log v0.4.17 -> v0.4.19
Updating matchit v0.7.0 -> v0.7.2
Updating mime v0.3.16 -> v0.3.17
Updating miniz_oxide v0.6.2 -> v0.7.1
Updating mio v0.8.5 -> v0.8.8
Updating nostr v0.18.0 -> v0.18.1
Updating nostr-rs-relay v0.8.9 -> v0.8.10
Updating num-traits v0.2.15 -> v0.2.16
Updating num_cpus v1.15.0 -> v1.16.0
Adding object v0.31.1
Updating once_cell v1.17.0 -> v1.18.0
Updating openssl v0.10.45 -> v0.10.55
Updating openssl-macros v0.1.0 -> v0.1.1
Updating openssl-sys v0.9.80 -> v0.9.90
Removing os_str_bytes v6.4.1
Updating parking v2.0.0 -> v2.1.0
Updating parking_lot_core v0.9.7 -> v0.9.8
Updating paste v1.0.11 -> v1.0.14
Updating percent-encoding v2.2.0 -> v2.3.0
Updating pest v2.5.5 -> v2.7.2
Updating pest_derive v2.5.5 -> v2.7.2
Updating pest_generator v2.5.5 -> v2.7.2
Updating pest_meta v2.5.5 -> v2.7.2
Updating pin-project v1.0.12 -> v1.1.2
Updating pin-project-internal v1.0.12 -> v1.1.2
Updating pin-project-lite v0.2.9 -> v0.2.10
Updating pkg-config v0.3.26 -> v0.3.27
Updating polling v2.5.2 -> v2.8.0
Updating portable-atomic v0.3.19 -> v1.4.2
Updating prettyplease v0.1.23 -> v0.1.25
Removing proc-macro-error v1.0.4
Removing proc-macro-error-attr v1.0.4
Updating proc-macro2 v1.0.51 -> v1.0.66
Updating prost v0.11.6 -> v0.11.9
Updating prost-build v0.11.6 -> v0.11.9
Updating prost-derive v0.11.6 -> v0.11.9
Updating prost-types v0.11.6 -> v0.11.9
Updating quote v1.0.23 -> v1.0.32
Updating raw-cpuid v10.6.1 -> v10.7.0
Adding redox_syscall v0.3.5
Updating regex v1.7.1 -> v1.9.1
Adding regex-automata v0.3.4
Removing regex-syntax v0.6.28
Adding regex-syntax v0.6.29
Adding regex-syntax v0.7.4
Removing remove_dir_all v0.5.3
Adding rustc-demangle v0.1.23
Removing rustix v0.36.8
Adding rustix v0.37.23
Adding rustix v0.38.6
Updating rustls-pemfile v1.0.2 -> v1.0.3
Updating rustversion v1.0.11 -> v1.0.14
Updating ryu v1.0.12 -> v1.0.15
Updating schannel v0.1.21 -> v0.1.22
Updating scheduled-thread-pool v0.2.6 -> v0.2.7
Updating scopeguard v1.1.0 -> v1.2.0
Removing scratch v1.0.3
Updating security-framework v2.8.2 -> v2.9.2
Updating security-framework-sys v2.8.0 -> v2.9.1
Updating serde v1.0.152 -> v1.0.181
Updating serde_derive v1.0.152 -> v1.0.181
Updating serde_json v1.0.93 -> v1.0.104
Updating sha2 v0.10.6 -> v0.10.7
Updating slab v0.4.7 -> v0.4.8
Updating smallvec v1.10.0 -> v1.11.0
Updating socket2 v0.4.7 -> v0.4.9
Updating sqlx v0.6.2 -> v0.6.3
Updating sqlx-core v0.6.2 -> v0.6.3
Updating sqlx-macros v0.6.2 -> v0.6.3
Updating sqlx-rt v0.6.2 -> v0.6.3
Updating stringprep v0.1.2 -> v0.1.3
Updating subtle v2.4.1 -> v2.5.0
Removing syn v1.0.107
Adding syn v1.0.109
Adding syn v2.0.28
Updating tempfile v3.3.0 -> v3.7.0
Removing termcolor v1.2.0
Updating thiserror v1.0.38 -> v1.0.44
Updating thiserror-impl v1.0.38 -> v1.0.44
Updating tikv-jemalloc-sys v0.5.3+5.3.0-patched -> v0.5.4+5.3.0-patched
Updating tikv-jemallocator v0.5.0 -> v0.5.4
Updating time v0.3.20 -> v0.3.25
Updating time-core v0.1.0 -> v0.1.1
Updating time-macros v0.2.8 -> v0.2.11
Updating tokio v1.25.0 -> v1.29.1
Updating tokio-macros v1.8.2 -> v2.1.0
Updating tokio-stream v0.1.11 -> v0.1.14
Updating tokio-util v0.7.7 -> v0.7.8
Adding tonic v0.9.2
Removing tower-http v0.3.5
Updating tracing-attributes v0.1.23 -> v0.1.26
Updating tracing-core v0.1.30 -> v0.1.31
Updating tracing-subscriber v0.3.16 -> v0.3.17
Updating ucd-trie v0.1.5 -> v0.1.6
Updating unicode-bidi v0.3.10 -> v0.3.13
Updating unicode-ident v1.0.6 -> v1.0.11
Updating url v2.3.1 -> v2.4.0
Adding utf8parse v0.2.1
Updating uuid v1.3.0 -> v1.4.1
Updating value-bag v1.0.0-alpha.9 -> v1.4.1
Updating want v0.3.0 -> v0.3.1
Updating wasm-bindgen v0.2.84 -> v0.2.87
Updating wasm-bindgen-backend v0.2.84 -> v0.2.87
Updating wasm-bindgen-futures v0.4.34 -> v0.4.37
Updating wasm-bindgen-macro v0.2.84 -> v0.2.87
Updating wasm-bindgen-macro-support v0.2.84 -> v0.2.87
Updating wasm-bindgen-shared v0.2.84 -> v0.2.87
Updating web-sys v0.3.61 -> v0.3.64
Removing wepoll-ffi v0.1.2
Updating whoami v1.3.0 -> v1.4.1
Removing winapi-util v0.1.5
Adding windows v0.48.0
Updating windows-sys v0.42.0 -> v0.48.0
Removing windows-targets v0.42.1
Adding windows-targets v0.42.2
Adding windows-targets v0.48.1
Removing windows_aarch64_gnullvm v0.42.1
Adding windows_aarch64_gnullvm v0.42.2
Adding windows_aarch64_gnullvm v0.48.0
Removing windows_aarch64_msvc v0.42.1
Adding windows_aarch64_msvc v0.42.2
Adding windows_aarch64_msvc v0.48.0
Removing windows_i686_gnu v0.42.1
Adding windows_i686_gnu v0.42.2
Adding windows_i686_gnu v0.48.0
Removing windows_i686_msvc v0.42.1
Adding windows_i686_msvc v0.42.2
Adding windows_i686_msvc v0.48.0
Removing windows_x86_64_gnu v0.42.1
Adding windows_x86_64_gnu v0.42.2
Adding windows_x86_64_gnu v0.48.0
Removing windows_x86_64_gnullvm v0.42.1
Adding windows_x86_64_gnullvm v0.42.2
Adding windows_x86_64_gnullvm v0.48.0
Removing windows_x86_64_msvc v0.42.1
Adding windows_x86_64_msvc v0.42.2
Adding windows_x86_64_msvc v0.48.0
2023-08-05 11:37:23 -05:00
Greg Heartsfield
21d1bbcfe3 build: bump version to 0.8.10 2023-08-05 11:18:12 -05:00
Greg Heartsfield
c3e13af9e3 test: wip integration test for event publishing 2023-08-05 11:16:11 -05:00
Greg Heartsfield
05f70112e8 improvement: reduce logging for hex parse failures in events 2023-08-05 07:13:53 -05:00
Greg Heartsfield
eab522dc39 feat: warn or exit on config file parse errors
The relay will now fail to start if an invalid config file is
explicitly provided.  If the file was read implicitly from the current
directory, a warning will be provided, but the relay will still startup.
2023-07-29 08:33:27 -05:00
Iru Sensei
edf7af1573 feat: verify config file exists and can be read 2023-07-29 08:32:55 -05:00
Václav Navrátil
34f497a650 docs: example SQL to delete old events
Added SQL Query example to delete events older than 30 days.
2023-07-29 06:45:17 -05:00
Greg Heartsfield
4adad4c3a9 fix: update since/until semantics for subscriptions 2023-07-16 11:42:55 -05:00
Václav Navrátil
70dfcb6a04 feat(NIP-11): relay_icon option added 2023-07-16 11:42:41 -05:00
jiftechnify
c50e10aa21 fix: keep up with the latest specs for since/until filter 2023-07-15 11:12:38 -05:00
Greg Heartsfield
9e22776227 refactor: whitespace 2023-07-03 10:35:51 -05:00
Greg Heartsfield
dad6911807 refactor: clippy suggestions 2023-07-03 10:31:22 -05:00
thesimplekid
ddc58a2f1c feat: config sending dms on pay to relay signup 2023-07-03 09:51:28 -05:00
thesimplekid
1131c1986e fix: lnbits expired invoice for existing user 2023-07-03 09:51:07 -05:00
thesimplekid
06fcaad9a1 chore: typos 2023-07-03 09:49:40 -05:00
Greg Heartsfield
087b68128f fix: ensure startup SQL runs, even with zero min writers 2023-06-23 10:38:06 -05:00
Greg Heartsfield
4647476622 improvement: default to logging on stdout 2023-06-23 10:34:25 -05:00
Greg Heartsfield
7a72e588ea refactor: reorder imports 2023-06-23 10:03:08 -05:00
Jamin M
9237eed735 feat: roll over logs daily 2023-06-23 10:03:01 -05:00
Jamin M
f4beb884b3 feat: allow logging output to file 2023-06-23 10:02:49 -05:00
Yuval Adam
73285683a3 docs: add database maintenance example queries 2023-06-23 09:55:05 -05:00
rorp
2f10271903 improvement(NIP-42): use 'restricted:' prefix for auth error msgs 2023-06-23 09:52:50 -05:00
thesimplekid
a34516628b docs: typo in build-essential package name 2023-06-23 09:48:43 -05:00
Greg Heartsfield
eba7a32615 perf: reduce SQLite connection count and idle lifetime
On lightly loaded relays, we free up memory faster by letting idle
connections be reclaimed in 10 seconds instead of the default 10
minutes.  This also sets the minimum to zero connections, instead of
always trying to hold one open.
2023-05-07 19:38:18 -05:00
Greg Heartsfield
4d746fad85 docs: helpful ubuntu packages for building 2023-05-07 19:33:10 -05:00
Greg Heartsfield
0582a891cc perf: switch to jemalloc allocator 2023-05-07 19:32:50 -05:00
Greg Heartsfield
2bcddf8bbf perf: disable sqlite mmap to reduce memory pressure 2023-05-06 15:40:56 -05:00
Greg Heartsfield
1595ec783d docs: allow host header prefix matching, required for Damus compatibility 2023-05-06 14:43:30 -05:00
Greg Heartsfield
a2d1d78e23 docs: reformatting 2023-05-06 14:42:59 -05:00
Greg Heartsfield
04db2203bb perf: use standard allocator, limit sqlite mmap to 4GB
This is an experimental change to see if we can reduce memory usage
with large SQLite databases.  If successful, we'll do this again and
further reduce the database mmap size.

This will cause greater use of the page cache, but that is more easily
reclaimed by the kernel, and should reduce memory pressure, as well as
making it clearer how much memory the application is actually using
for connections, subscriptions, etc.
2023-05-03 07:22:44 -05:00
Greg Heartsfield
1c1b1a1802 build: upgrade checkout action for github ci 2023-04-30 11:13:03 -05:00
Greg Heartsfield
993fec4eed improvement: document pg connection_write config 2023-04-30 10:10:06 -05:00
Kieran
beffeb4d86 improvement: add a configurable postgres write conn string
This adds a new configurable connection string for postgres writes.
2023-04-30 10:02:10 -05:00
Petr Kracik
5135f3b007 improvement: use appropriate paths for systemd example 2023-04-30 09:55:07 -05:00
Greg Heartsfield
ba0b50bc9c build: bump version to 0.8.9 2023-04-22 13:47:08 -05:00
0xtr
c65c64275e docs: add systemd service file and guide 2023-04-19 18:37:16 -05:00
Greg Heartsfield
80c459c36c improvement: switch to jemalloc allocator 2023-04-06 18:33:30 -05:00
rorp
8e4e2d824b feat(NIP-42): limit access to kind 4 DMs 2023-03-03 09:04:35 -06:00
thesimplekid
c13961a5c4 fix: nip05 for postgres 2023-03-03 08:57:23 -06:00
thesimplekid
05b08c7916 feat: join via nip-07 2023-03-01 18:04:06 -06:00
Greg Heartsfield
9a141dc950 improvement: disable HTTP request logging 2023-02-25 15:57:01 -06:00
Greg Heartsfield
8c9170d4e3 fix: persist database version for v18 migration 2023-02-25 15:55:00 -06:00
Greg Heartsfield
5508020777 improvement: configure pay-to-relay defaults and comment block 2023-02-25 15:53:32 -06:00
Greg Heartsfield
43021910ea improvement: disable pay-to-relay by default 2023-02-25 15:41:30 -06:00
thesimplekid
c0158af18b feat(NIP-111): pay to relay (experimental) 2023-02-25 15:38:26 -06:00
Rene Honig
164603dedd docs: add Traefik to reverse proxy doc 2023-02-25 14:50:58 -06:00
Greg Heartsfield
c1c25a22f5 refactor: format 2023-02-25 14:49:35 -06:00
thesimplekid
6df92f9580 refactor: format
cargo fmt
2023-02-25 14:46:49 -06:00
Greg Heartsfield
440217e1ee docs: add documented support for NIP-40 2023-02-25 14:29:52 -06:00
Greg Heartsfield
96359aafab docs: better example of kinds for allowlist 2023-02-25 14:05:11 -06:00
Mike White
5414629298 feat: add event kind allowlist 2023-02-25 14:00:01 -06:00
Greg Heartsfield
2be75e18fb build: bump version to 0.8.8 2023-02-21 08:16:40 -06:00
Greg Heartsfield
5f6ff4c2b7 fix: in-memory SQLite DB correctly shares memory between connections
fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/73#event-227131
2023-02-21 08:14:19 -06:00
Greg Heartsfield
df411c24fb fix: block other writers during checkpoint to eliminate DB lock errors 2023-02-20 16:50:44 -06:00
Greg Heartsfield
39f9984c4f build: bump version to 0.8.7 2023-02-17 21:05:36 -06:00
Greg Heartsfield
9d55731073 fix: Postgres SQL generation for expiring events 2023-02-17 21:04:30 -06:00
Greg Heartsfield
5638f70d66 fix: set SQL tracing back to appropriate level 2023-02-17 20:50:19 -06:00
Greg Heartsfield
98a08d054a improvement: advertise support for NIP-42 in relay info 2023-02-17 14:02:49 -06:00
Greg Heartsfield
0ef7d618a8 build: bump version to 0.8.6 2023-02-17 13:59:07 -06:00
Greg Heartsfield
bf06bea808 feat(NIP-40): postgres support for event expiration 2023-02-17 13:25:56 -06:00
Greg Heartsfield
e5ca8c2a86 improvement: run expired event cleanup every 10 minutes 2023-02-17 11:22:00 -06:00
Greg Heartsfield
8ea63f0b27 feat(NIP-40): sqlite support for event expiration 2023-02-17 11:15:06 -06:00
Greg Heartsfield
3229e4192f feat: publish favicon.ico 2023-02-16 18:03:28 -06:00
0xtr
7fd9b55e70 fix: typo in sqlite_migration.rs 2023-02-15 18:52:49 -06:00
rorp
5cecfba319 feat(NIP-42): pubkey authentication
Configurable in `config.toml`.  Limited functionality, but this does
send metadata to gRPC for event authorization.

fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/66
2023-02-15 18:51:40 -06:00
Greg Heartsfield
d0f57aea21 improvement(NIP-40): functions for checking event expiration 2023-02-15 18:47:27 -06:00
Yuval Adam
40abd6858e docs: cleanup location of documentation 2023-02-15 18:43:22 -06:00
Greg Heartsfield
136e41d234 fix: retry event writes if DB is busy 2023-02-15 18:38:34 -06:00
Yuval Adam
35a1973a46 fix: allow older versions of protobuf-compiler to work
Add --experimental_allow_proto3_optional protoc arg in build configs

fixes https://github.com/scsibug/nostr-rs-relay/issues/77
2023-02-14 16:59:41 -06:00
Kieran
1daa25600d fix: postgres tag inserts 2023-02-14 06:33:01 -06:00
Greg Heartsfield
692925942a build: bump version to 0.8.5 2023-02-13 17:53:33 -06:00
Greg Heartsfield
84afd4b64e refactor: whitespace 2023-02-13 17:52:00 -06:00
Greg Heartsfield
46160bb1f9 fix: correct name of gRPC configuration in toml 2023-02-13 17:30:26 -06:00
Greg Heartsfield
2fc9168a38 fix: SQL error with parameterized replaceable events 2023-02-13 17:10:42 -06:00
Greg Heartsfield
01d0d44868 build: bump version to 0.8.4 2023-02-13 09:34:30 -06:00
Greg Heartsfield
93f6337fda fix: upgrade docker image to include OpenSSL 3 2023-02-13 09:33:14 -06:00
49 changed files with 5513 additions and 1895 deletions

View File

@ -9,7 +9,7 @@ jobs:
test_nostr-rs-relay:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Update local toolchain
run: |

4
.gitignore vendored
View File

@ -1,2 +1,4 @@
/target
**/target/
nostr.db
nostr.db-*
justfile

2337
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
[package]
name = "nostr-rs-relay"
version = "0.8.3"
version = "0.8.13"
edition = "2021"
authors = ["Greg Heartsfield <scsibug@imap.cc>"]
description = "A relay implementation for the Nostr protocol"
@ -13,8 +13,9 @@ categories = ["network-programming", "web-programming"]
[dependencies]
clap = { version = "4.0.32", features = ["env", "default", "derive"]}
tracing = "0.1.36"
tracing-subscriber = "0.2.0"
tracing = "0.1.37"
tracing-appender = "0.2.2"
tracing-subscriber = "0.3.16"
tokio = { version = "1", features = ["full", "tracing", "signal"] }
prost = "0.11"
tonic = "0.8.3"
@ -38,7 +39,7 @@ lazy_static = "1.4"
governor = "0.4"
nonzero_ext = "0.3"
hyper = { version="0.14", features=["client", "server","http1","http2","tcp"] }
hyper-tls = "0.5"
hyper-rustls = { version = "0.24" }
http = { version = "0.2" }
parse_duration = "2"
rand = "0.8"
@ -51,6 +52,12 @@ chrono = "0.4.23"
prometheus = "0.13.3"
indicatif = "0.17.3"
bech32 = "0.9.1"
url = "2.3.1"
qrcode = { version = "0.12.0", default-features = false, features = ["svg"] }
nostr = { version = "0.18.0", default-features = false, features = ["base", "nip04", "nip19"] }
[target.'cfg(not(target_env = "msvc"))'.dependencies]
tikv-jemallocator = "0.5"
log = "0.4"
[dev-dependencies]
anyhow = "1"

View File

@ -21,7 +21,7 @@ COPY ./build.rs ./build.rs
RUN rm ./target/release/deps/nostr*relay*
RUN cargo auditable build --release --locked
FROM docker.io/library/debian:bullseye-slim
FROM docker.io/library/debian:bookworm-slim
ARG APP=/usr/src/app
ARG APP_DATA=/usr/src/app/db

View File

@ -35,6 +35,8 @@ mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
- [ ] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md) (_implemented, but currently disabled_)
- [x] NIP-28: [Public Chat](https://github.com/nostr-protocol/nips/blob/master/28.md)
- [x] NIP-33: [Parameterized Replaceable Events](https://github.com/nostr-protocol/nips/blob/master/33.md)
- [x] NIP-40: [Expiration Timestamp](https://github.com/nostr-protocol/nips/blob/master/40.md)
- [x] NIP-42: [Authentication of clients to relays](https://github.com/nostr-protocol/nips/blob/master/42.md)
## Quick Start
@ -47,7 +49,7 @@ The examples below start a rootless podman container, mapping a local
data directory and config file.
```console
$ podman build -t nostr-rs-relay .
$ podman build --pull -t nostr-rs-relay .
$ mkdir data
@ -91,6 +93,11 @@ https://hub.docker.com/r/scsibug/nostr-rs-relay
Building `nostr-rs-relay` requires an installation of Cargo & Rust: https://www.rust-lang.org/tools/install
The following OS packages will be helpful; on Debian/Ubuntu:
```console
$ sudo apt-get install build-essential cmake protobuf-compiler pkg-config libssl-dev
```
Clone this repository, and then build a release version of the relay:
```console
@ -139,7 +146,7 @@ settings.
For examples of putting the relay behind a reverse proxy (for TLS
termination, load balancing, and other features), see [Reverse
Proxy](reverse-proxy.md).
Proxy](docs/reverse-proxy.md).
## Dev Channel

View File

@ -1,4 +1,7 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::compile_protos("proto/nauthz.proto")?;
tonic_build::configure()
.build_server(false)
.protoc_arg("--experimental_allow_proto3_optional")
.compile(&["proto/nauthz.proto"], &["proto"])?;
Ok(())
}

View File

@ -10,12 +10,19 @@ name = "nostr-rs-relay"
# Description
description = "A newly created nostr-rs-relay.\n\nCustomize this with your own info."
# Administrative contact pubkey
# Administrative contact pubkey (32-byte hex, not npub)
#pubkey = "0c2d168a4ae8ca58c9f1ab237b5df682599c6c7ab74307ea8b05684b60405d41"
# Administrative contact URI
#contact = "mailto:contact@example.com"
# Favicon location. Relative to the current directory. Assumes an
# ICO format.
#favicon = "favicon.ico"
# URL of Relay's icon.
#relay_icon = "https://example.test/img.png"
[diagnostics]
# Enable tokio tracing (for use with tokio-console)
#tracing = false
@ -38,7 +45,7 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
# Database connection pool settings for subscribers:
# Minimum number of SQLite reader connections
#min_conn = 4
#min_conn = 0
# Maximum number of SQLite reader connections. Recommend setting this
# to approx the number of cores.
@ -48,6 +55,16 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
# sqlite.
#connection = "postgresql://postgres:nostr@localhost:7500/nostr"
# Optional database connection string for writing. Use this for
# postgres clusters where you want to separate reads and writes to
# different nodes. Ignore for single-database instances.
#connection_write = "postgresql://postgres:nostr@localhost:7500/nostr"
[logging]
# Directory to store log files. Log files roll over daily.
#folder_path = "./log"
#file_prefix = "nostr-relay"
[grpc]
# gRPC interfaces for externalized decisions and other extensions to
# functionality.
@ -56,7 +73,12 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
# the URL below. In the event the server is not accessible, events
# will be permitted. The protobuf3 schema used is available in
# `proto/nauthz.proto`.
# event_authorization_server = "http://[::1]:50051"
# event_admission_server = "http://[::1]:50051"
# If the event admission server denies writes
# in any case (excluding spam filtering).
# This is reflected in the relay information document.
# restricts_write = true
[network]
# Bind to this network address
@ -128,6 +150,16 @@ reject_future_seconds = 1800
# 70202,
#]
# Event kind allowlist. Events other than these kinds will be discarded.
#event_kind_allowlist = [
# 0, 1, 2, 3, 7, 40, 41, 42, 43, 44, 30023,
#]
# Rejects imprecise requests (kind only and author only etc)
# This is a temperary measure to improve the adoption of outbox model
# Its recommended to have this enabled
limit_scrapers = false
[authorization]
# Pubkey addresses in this array are whitelisted for event publishing.
# Only valid events by these authors will be accepted, if the variable
@ -136,6 +168,10 @@ reject_future_seconds = 1800
# "35d26e4690cbe1a898af61cc3515661eb5fa763b57bd0b42e45099c8b32fd50f",
# "887645fef0ce0c3c1218d2f5d8e6132a19304cdc57cd20281d082f38cfea0072",
#]
# Enable NIP-42 authentication
#nip42_auth = false
# Send DMs (kind 4 and 44) and gift wraps (kind 1059) only to their authenticated recipients
#nip42_dms = false
[verified_users]
# NIP-05 verification of users. Can be "enabled" to require NIP-05
@ -162,3 +198,47 @@ reject_future_seconds = 1800
# How many consecutive failed checks before we give up on verifying
# this author.
#max_consecutive_failures = 20
[pay_to_relay]
# Enable pay to relay
#enabled = false
# The cost to be admitted to relay
#admission_cost = 4200
# The cost in sats per post
#cost_per_event = 0
# Url of lnbits api
#node_url = "<node url>"
# LNBits api secret
#api_secret = "<ln bits api>"
# Nostr direct message on signup
#direct_message=false
# Terms of service
#terms_message = """
#This service (and supporting services) are provided "as is", without warranty of any kind, express or implied.
#
#By using this service, you agree:
#* Not to engage in spam or abuse the relay service
#* Not to disseminate illegal content
#* That requests to delete content cannot be guaranteed
#* To use the service in compliance with all applicable laws
#* To grant necessary rights to your content for unlimited time
#* To be of legal age and have capacity to use this service
#* That the service may be terminated at any time without notice
#* That the content you publish may be removed at any time without notice
#* To have your IP address collected to detect abuse or misuse
#* To cooperate with the relay to combat abuse or misuse
#* You may be exposed to content that you might find triggering or distasteful
#* The relay operator is not liable for content produced by users of the relay
#"""
# Whether or not new sign ups should be allowed
#sign_ups = false
# optional if `direct_message=false`
#secret_key = "<nostr nsec>"

View File

@ -0,0 +1,14 @@
[Unit]
Description=nostr-rs-relay
[Service]
User=REPLACE_WITH_YOUR_USERNAME
WorkingDirectory=/var/lib/nostr-rs-relay
Environment=RUST_LOG=warn,nostr_rs_relay=info
ExecStart=/usr/bin/nostr-rs-relay --config /etc/nostr-rs-relay/config.toml
TimeoutStopSec=10
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target

View File

@ -78,18 +78,24 @@ PRAGMA foreign_keys = ON;
delete from event where event_hash=x'00000000000c1271675dc86e3e1dd1336827bccabb90dc4c9d3b4465efefe00e';
```
### Deleting All Events for Pubkey
### Querying and Deleting All Events for Pubkey
```console
PRAGMA foreign_keys = ON;
select lower(hex(author)) as author, count(*) as c from event group by author order by c asc;
delete from event where author=x'000000000002c7831d9c5a99f183afc2813a6f69a16edda7f6fc0ed8110566e6';
```
### Deleting All Events of a Kind
### Querying and Deleting All Events of a Kind
```console
PRAGMA foreign_keys = ON;
select printf('%7d', kind), count(*) as c from event group by kind order by c;
delete from event where kind=70202;
```
@ -106,7 +112,8 @@ seen" policy.
```console
PRAGMA foreign_keys = ON;
TODO!
DELETE FROM event WHERE first_seen < CAST(strftime('%s', date('now', '-30 day')) AS INT);
```
### Delete Profile Events with No Recent Events

View File

@ -10,7 +10,7 @@ and reduce spam and abuse.
This will likely evolve substantially, the first goal is to get a
basic one-way service that lets an externalized program decide on
event persistance. This does not represent the final state of gRPC
event persistence. This does not represent the final state of gRPC
extensibility in `nostr-rs-relay`.
## Considerations

84
docs/pay-to-relay.md Normal file
View File

@ -0,0 +1,84 @@
# Pay to Relay Design Document
The relay with use payment as a form of spam prevention. In order to post to the relay a user must pay a set rate. There is also the option to require a payment for each note posted to the relay. There is no cost to read from the relay.
## Configuration
Currently, [LNBits](https://github.com/lnbits/lnbits) is implemented as the payment processor. LNBits exposes a simple API for creating invoices, to use this API create a wallet and on the right side find "API info" you will need to add the invoice/read key to this relays config file.
The below configuration will need to be added to config.toml
```
[pay_to_relay]
# Enable pay to relay
enabled = true
# The cost to be admitted to relay
admission_cost = 1000
# The cost in sats per post
cost_per_event = 0
# Url of lnbits api
node_url = "https://<IP of node>:5001/api/v1/payments"
# LNBits api secret
api_secret = "<LNbits api key>"
# Terms of service
terms_message = """This service ....
"""
# Whether or not new sign ups should be allowed
sign_ups = true
secret_key = "<nostr secret key to send dms>"
```
The LNBits instance must have a signed HTTPS a self signed certificate will not work.
## Design Overview
### Concepts
All authors are initially not admitted to write to the relay. There are two ways to gain access write to the relay. The first is by attempting to post the the relay, upon receiving an event from an author that is not admitted, the relay will send a direct message including the terms of service of the relay and a lighting invoice for the admission cost. Once this invoice is paid the author can write to the relay. For this method to work the author must be reading from the relay. An author can also pay and accept the terms of service via a webpage `https://<relay-url>/join`.
## Design Details
Authors are stored in a dedicated table. This tracks:
* `pubkey`
* `is_admitted` whether on no the admission invoice has been paid, accepting the terms of service.
* `balance` the current balance in sats of the author, used if there is a cost per post
* `tos_accepted_at` the timestamp of when the author accepted the tos
Invoice information is stored in a dedicated table. This tracks:
* `payment_hash` the payment hash of the lighting invoice
* `pubkey` of the author the invoice is issued to
* `invoice` bolt11 invoice
* `amount` in sats
* `status` (Paid/Unpaid/Expired)
* `description`
* `created_at` timestamp of creation
* `confirmed_at` timestamp of payment
### Event Handling
If "pay to relay" is enabled, all incoming events are evaluated to determine whether the author is on the relay's whitelist or if they have paid the admission fee and accepted the terms. If "pay per note" is enabled, there is an additional check to ensure that the author has enough balance, which is then reduced by the cost per note. If the author is on the whitelist, this balance check is not necessary.
### Integration
We have an existing database writer thread, which receives events and
attempts to persist them to disk. Once validated and persisted, these
events are broadcast to all subscribers.
When "pay to relay" is enabled, the writer must check if the author is admitted to post. If the author is not admitted to post the event is forwarded to the payment module. Where an invoice is generated, persisted and broadcast as an direct message to the author.
### Threat Scenarios
Some of these mitigation's are fully implemented, others are documented
simply to demonstrate a mitigation is possible.
### Sign up Spamming
*Threat*: An attacker generates a large number of new pubkeys publishing to the relays. Causing a large number of new invoices to be created for each new pubkey.
*Mitigation*: Rate limit number of new sign ups
### Admitted Author Spamming
*Threat*: An attacker gains write access by paying the admission fee, and then floods the relay with a large number of spam events.
*Mitigation*: The attacker's admission can be revoked and their admission fee will not be refunded. Enabling "cost per event" and increasing the admission cost can also discourage this type of behavior.

View File

@ -1,8 +1,8 @@
# Reverse Proxy Setup Guide
It is recommended to run `nostr-rs-relay` behind a reverse proxy such
as `haproxy` or `nginx` to provide TLS termination. Simple examples
of `haproxy` and `nginx` configurations are documented here.
as `haproxy`, `nginx` or `traefik` to provide TLS termination. Simple examples
for `haproxy`, `nginx` and `traefik` configurations are documented here.
## Minimal HAProxy Configuration
@ -29,7 +29,7 @@ frontend fe_prod
bind :80
http-request set-header X-Forwarded-Proto https if { ssl_fc }
redirect scheme https code 301 if !{ ssl_fc }
acl host_relay hdr(host) -i relay.example.com
acl host_relay hdr(host) -i -m beg relay.example.com
use_backend relay if host_relay
# HSTS (1 year)
http-response set-header Strict-Transport-Security max-age=31536000
@ -104,8 +104,96 @@ http {
### Nginx Notes
The above configuration was tested on `nginx` `1.18.0` was tested on `Ubuntu 20.04`.
The above configuration was tested on `nginx` `1.18.0` on `Ubuntu` `20.04` and `22.04`
For help installing `nginx` on `Ubuntu`, see [this guide](https://www.digitalocean.com/community/tutorials/how-to-install-nginx-on-ubuntu-20-04).
For guidance on using `letsencrypt` to obtain a cert on `Ubuntu`, including an `nginx` plugin, see [this post](https://www.digitalocean.com/community/tutorials/how-to-secure-nginx-with-let-s-encrypt-on-ubuntu-20-04).
## Example Traefik Configuration
Assumptions:
* `Traefik` version is `2.9` (other versions not tested).
* `Traefik` is used for provisioning of Let's Encrypt certificates.
* `Traefik` is running in `Docker`, using `docker compose` and labels for the static configuration. An equivalent setup using a Traefik config file is possible too (but not covered here).
* Strict Transport Security is enabled.
* Hostname for the relay is `relay.example.com`, email address for ACME certificates provider is `name@example.com`.
* ipv6 is enabled, a viable private ipv6 subnet is specified in the example below.
* Relay is running on port `8080`.
```
version: '3'
networks:
nostr:
enable_ipv6: true
ipam:
config:
- subnet: fd00:db8:a::/64
gateway: fd00:db8:a::1
services:
traefik:
image: traefik:v2.9
networks:
nostr:
command:
- "--log.level=ERROR"
# letsencrypt configuration
- "--certificatesResolvers.http.acme.email==name@example.com"
- "--certificatesResolvers.http.acme.storage=/certs/acme.json"
- "--certificatesResolvers.http.acme.httpChallenge.entryPoint=http"
# define entrypoints
- "--entryPoints.http.address=:80"
- "--entryPoints.http.http.redirections.entryPoint.to=https"
- "--entryPoints.http.http.redirections.entryPoint.scheme=https"
- "--entryPoints.https.address=:443"
- "--entryPoints.https.forwardedHeaders.insecure=true"
- "--entryPoints.https.proxyProtocol.insecure=true"
# docker provider (get configuration from container labels)
- "--providers.docker.endpoint=unix:///var/run/docker.sock"
- "--providers.docker.exposedByDefault=false"
- "--providers.file.directory=/config"
- "--providers.file.watch=true"
ports:
- "80:80"
- "443:443"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "$(pwd)/traefik/certs:/certs"
- "$(pwd)/traefik/config:/config"
logging:
driver: "local"
restart: always
# example nostr config. only labels: section is relevant for Traefik config
nostr:
image: nostr-rs-relay:latest
container_name: nostr-relay
networks:
nostr:
restart: always
user: 100:100
volumes:
- '$(pwd)/nostr/data:/usr/src/app/db:Z'
- '$(pwd)/nostr/config/config.toml:/usr/src/app/config.toml:ro,Z'
labels:
- "traefik.enable=true"
- "traefik.http.routers.nostr.entrypoints=https"
- "traefik.http.routers.nostr.rule=Host(`relay.example.com`)"
- "traefik.http.routers.nostr.tls.certresolver=http"
- "traefik.http.routers.nostr.service=nostr"
- "traefik.http.services.nostr.loadbalancer.server.port=8080"
- "traefik.http.services.nostr.loadbalancer.passHostHeader=true"
- "traefik.http.middlewares.nostr.headers.sslredirect=true"
- "traefik.http.middlewares.nostr.headers.stsincludesubdomains=true"
- "traefik.http.middlewares.nostr.headers.stspreload=true"
- "traefik.http.middlewares.nostr.headers.stsseconds=63072000"
- "traefik.http.routers.nostr.middlewares=nostr"
```
### Traefik Notes
Traefik will take care of the provisioning and renewal of certificates. In case of an ipv4-only relay, simply detele the `enable_ipv6:` and `ipam:` entries in the `networks:` section of the docker-compose file.

View File

@ -0,0 +1,40 @@
# Run as a linux system process
Docker makes it easy to spin up and down environments but it's also possible to run `nostr-rs-relay` as a systemd linux process.
This guide assumes you're on a Linux machine and that Rust is already installed.
## Instructions
### Build nostr-rs-relay from source
Start by building the application from source. Here is how to do that:
1. `git clone https://github.com/scsibug/nostr-rs-relay.git`
2. `cd nostr-rs-relay`
3. `cargo build --release`
### Place the files where they belong
We want to place the nostr-rs-relay binary and the config.toml file where they belong. While still in the root level of the nostr-rs-relay folder you cloned in last step, run the following commands:
1. `sudo cp target/release/nostr-rs-relay /usr/local/bin/`
2. `sudo mkdir /etc/nostr-rs-relay`
2. `sudo cp config.toml /etc/nostr-rs-relay`
### Create the Systemd service file
We need to create a new Systemd service file. These files are placed in the `/etc/systemd/system/` folder where you will find many other services running.
1. `sudo vim /etc/systemd/system/nostr-rs-relay.service`
2. Paste in the contents of [this service file](../contrib/nostr-rs-relay.service). Remember to replace the `User` value with your own username.
3. Save the file and exit your text editor
### Run the service
To get the service running, we need to reload the systemd daemon and enable the service.
1. `sudo systemctl daemon-reload`
2. `sudo systemctl start nostr-rs-relay.service`
3. `sudo systemctl enable nostr-rs-relay.service`
4. `sudo systemctl status nostr-rs-relay.service`
### Tips
#### Logs
The application will write logs to the journal. To read it, execute `sudo journalctl -f -u nostr-rs-relay`

View File

@ -179,7 +179,7 @@ attempts to persist them to disk. Once validated and persisted, these
events are broadcast to all subscribers.
When verification is enabled, the writer must check to ensure a valid,
unexpired verification record exists for the auther. All metadata
unexpired verification record exists for the author. All metadata
events (regardless of verification status) are forwarded to a verifier
module. If the verifier determines a new verification record is
needed, it is also responsible for persisting and broadcasting the

7
examples/nauthz/build.rs Normal file
View File

@ -0,0 +1,7 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::configure()
.build_server(true)
.protoc_arg("--experimental_allow_proto3_optional")
.compile(&["../../proto/nauthz.proto"], &["../../proto"])?;
Ok(())
}

View File

@ -1,7 +1,7 @@
use tonic::{transport::Server, Request, Response, Status};
use nauthz_grpc::authorization_server::{Authorization, AuthorizationServer};
use nauthz_grpc::{EventReply, EventRequest, Decision};
use nauthz_grpc::{Decision, EventReply, EventRequest};
pub mod nauthz_grpc {
tonic::include_proto!("nauthz");
@ -14,7 +14,6 @@ pub struct EventAuthz {
#[tonic::async_trait]
impl Authorization for EventAuthz {
async fn event_admit(
&self,
request: Request<EventRequest>,
@ -22,18 +21,18 @@ impl Authorization for EventAuthz {
let reply;
let req = request.into_inner();
let event = req.event.unwrap();
let content_prefix:String = event.content.chars().take(40).collect();
let content_prefix: String = event.content.chars().take(40).collect();
println!("recvd event, [kind={}, origin={:?}, nip05_domain={:?}, tag_count={}, content_sample={:?}]",
event.kind, req.origin, req.nip05.map(|x| x.domain), event.tags.len(), content_prefix);
// Permit any event with a whitelisted kind
if self.allowed_kinds.contains(&event.kind) {
println!("This looks fine! (kind={})",event.kind);
println!("This looks fine! (kind={})", event.kind);
reply = nauthz_grpc::EventReply {
decision: Decision::Permit as i32,
message: None
message: None,
};
} else {
println!("Blocked! (kind={})",event.kind);
println!("Blocked! (kind={})", event.kind);
reply = nauthz_grpc::EventReply {
decision: Decision::Deny as i32,
message: Some(format!("kind {} not permitted", event.kind)),
@ -49,7 +48,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
// A simple authorization engine that allows kinds 0-3
let checker = EventAuthz {
allowed_kinds: vec![0,1,2,3],
allowed_kinds: vec![0, 1, 2, 3],
};
println!("EventAuthz Server listening on {}", addr);
// Start serving

View File

@ -1,4 +0,0 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::compile_protos("../proto/nauthz.proto")?;
Ok(())
}

View File

@ -1,16 +1,16 @@
use nostr_rs_relay::config;
use nostr_rs_relay::error::{Error, Result};
use nostr_rs_relay::event::{single_char_tagname, Event};
use nostr_rs_relay::repo::sqlite::{build_pool, PooledConnection};
use nostr_rs_relay::repo::sqlite_migration::{curr_db_version, DB_VERSION};
use nostr_rs_relay::utils::is_lower_hex;
use rusqlite::params;
use rusqlite::{OpenFlags, Transaction};
use std::io;
use std::path::Path;
use nostr_rs_relay::utils::is_lower_hex;
use tracing::info;
use nostr_rs_relay::config;
use nostr_rs_relay::event::{Event,single_char_tagname};
use nostr_rs_relay::error::{Error, Result};
use nostr_rs_relay::repo::sqlite::{PooledConnection, build_pool};
use nostr_rs_relay::repo::sqlite_migration::{curr_db_version, DB_VERSION};
use rusqlite::{OpenFlags, Transaction};
use std::sync::mpsc;
use std::thread;
use rusqlite::params;
use tracing::info;
/// Bulk load JSONL data from STDIN to the database specified in config.toml (or ./nostr.db as a default).
/// The database must already exist, this will not create a new one.
@ -20,95 +20,101 @@ pub fn main() -> Result<()> {
let _trace_sub = tracing_subscriber::fmt::try_init();
println!("Nostr-rs-relay Bulk Loader");
// check for a database file, or create one.
let settings = config::Settings::new(&None);
let settings = config::Settings::new(&None)?;
if !Path::new(&settings.database.data_directory).is_dir() {
info!("Database directory does not exist");
return Err(Error::DatabaseDirError);
}
// Get a database pool
let pool = build_pool("bulk-loader", &settings, OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, 1,4,false);
let pool = build_pool(
"bulk-loader",
&settings,
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
1,
4,
false,
);
{
// check for database schema version
let mut conn: PooledConnection = pool.get()?;
let version = curr_db_version(&mut conn)?;
info!("current version is: {:?}", version);
// ensure the schema version is current.
if version != DB_VERSION {
info!("version is not current, exiting");
panic!("cannot write to schema other than v{DB_VERSION}");
}
// check for database schema version
let mut conn: PooledConnection = pool.get()?;
let version = curr_db_version(&mut conn)?;
info!("current version is: {:?}", version);
// ensure the schema version is current.
if version != DB_VERSION {
info!("version is not current, exiting");
panic!("cannot write to schema other than v{DB_VERSION}");
}
}
// this channel will contain parsed events ready to be inserted
let (event_tx, event_rx) = mpsc::sync_channel(100_000);
// Thread for reading events
let _stdin_reader_handler = thread::spawn(move || {
let stdin = io::stdin();
for readline in stdin.lines() {
if let Ok(line) = readline {
// try to parse a nostr event
let eres: Result<Event, serde_json::Error> = serde_json::from_str(&line);
if let Ok(mut e) = eres {
if let Ok(()) = e.validate() {
e.build_index();
//debug!("Event: {:?}", e);
event_tx.send(Some(e)).ok();
} else {
info!("could not validate event");
}
} else {
info!("error reading event: {:?}", eres);
}
} else {
// error reading
info!("error reading: {:?}", readline);
}
}
info!("finished parsing events");
event_tx.send(None).ok();
let ok: Result<()> = Ok(());
let stdin = io::stdin();
for readline in stdin.lines() {
if let Ok(line) = readline {
// try to parse a nostr event
let eres: Result<Event, serde_json::Error> = serde_json::from_str(&line);
if let Ok(mut e) = eres {
if let Ok(()) = e.validate() {
e.build_index();
//debug!("Event: {:?}", e);
event_tx.send(Some(e)).ok();
} else {
info!("could not validate event");
}
} else {
info!("error reading event: {:?}", eres);
}
} else {
// error reading
info!("error reading: {:?}", readline);
}
}
info!("finished parsing events");
event_tx.send(None).ok();
let ok: Result<()> = Ok(());
ok
});
let mut conn: PooledConnection = pool.get()?;
let mut events_read = 0;
let event_batch_size =50_000;
let event_batch_size = 50_000;
let mut new_events = 0;
let mut has_more_events = true;
while has_more_events {
// begin a transaction
let tx = conn.transaction()?;
// read in batch_size events and commit
for _ in 0..event_batch_size {
match event_rx.recv() {
Ok(Some(e)) => {
events_read += 1;
// ignore ephemeral events
if !(e.kind >= 20000 && e.kind < 30000) {
match write_event(&tx, e) {
Ok(c) => {
new_events += c;
},
Err(e) => {
info!("error inserting event: {:?}", e);
}
}
}
},
Ok(None) => {
// signal that the sender will never produce more
// events
has_more_events=false;
break;
},
Err(_) => {
info!("sender is closed");
// sender is done
}
}
}
info!("committed {} events...", new_events);
tx.commit()?;
conn.execute_batch("pragma wal_checkpoint(truncate)")?;
// begin a transaction
let tx = conn.transaction()?;
// read in batch_size events and commit
for _ in 0..event_batch_size {
match event_rx.recv() {
Ok(Some(e)) => {
events_read += 1;
// ignore ephemeral events
if !(e.kind >= 20000 && e.kind < 30000) {
match write_event(&tx, e) {
Ok(c) => {
new_events += c;
}
Err(e) => {
info!("error inserting event: {:?}", e);
}
}
}
}
Ok(None) => {
// signal that the sender will never produce more
// events
has_more_events = false;
break;
}
Err(_) => {
info!("sender is closed");
// sender is done
}
}
}
info!("committed {} events...", new_events);
tx.commit()?;
conn.execute_batch("pragma wal_checkpoint(truncate)")?;
}
info!("processed {} events", events_read);
info!("stored {} new events", new_events);
@ -131,39 +137,39 @@ fn write_event(tx: &Transaction, e: Event) -> Result<usize> {
params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str]
)?;
if ins_count == 0 {
return Ok(0);
return Ok(0);
}
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_id = tx.last_insert_rowid();
// look at each event, and each tag, creating new tag entries if appropriate.
for t in e.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagname = t.first().unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
continue;
}
// safe because len was > 1
let tagval = t.get(1).unwrap();
// insert as BLOB if we can restore it losslessly.
// this means it needs to be even length and lowercase.
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
tx.execute(
tx.execute(
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
params![event_id, tagname, hex::decode(tagval).ok()],
)?;
)?;
} else {
// otherwise, insert as text
tx.execute(
// otherwise, insert as text
tx.execute(
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
params![event_id, tagname, &tagval],
)?;
}
}
if e.is_replaceable() {
//let query = "SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1;";
//let count: usize = tx.query_row(query, params![e.kind, pubkey_blob], |row| row.get(0))?;
//info!("found {} rows that /would/ be preserved", count);
match tx.execute(
//let query = "SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1;";
//let count: usize = tx.query_row(query, params![e.kind, pubkey_blob], |row| row.get(0))?;
//info!("found {} rows that /would/ be preserved", count);
match tx.execute(
"DELETE FROM event WHERE kind=? and author=? and id NOT IN (SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1);",
params![e.kind, pubkey_blob, e.kind, pubkey_blob],
) {

View File

@ -7,14 +7,14 @@ pub struct CLIArgs {
short,
long,
help = "Use the <directory> as the location of the database",
required = false,
required = false
)]
pub db: Option<String>,
#[arg(
short,
long,
help = "Use the <file name> as the location of the config file",
required = false,
required = false
)]
pub config: Option<String>,
}

View File

@ -1,8 +1,8 @@
//! Configuration file and settings management
use crate::payment::Processor;
use config::{Config, ConfigError, File};
use serde::{Deserialize, Serialize};
use std::time::Duration;
use tracing::warn;
#[derive(Debug, Serialize, Deserialize, Clone)]
#[allow(unused)]
@ -12,6 +12,8 @@ pub struct Info {
pub description: Option<String>,
pub pubkey: Option<String>,
pub contact: Option<String>,
pub favicon: Option<String>,
pub relay_icon: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -23,12 +25,14 @@ pub struct Database {
pub min_conn: u32,
pub max_conn: u32,
pub connection: String,
pub connection_write: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Grpc {
pub event_admission_server: Option<String>,
pub restricts_write: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -68,13 +72,32 @@ pub struct Limits {
pub max_ws_frame_bytes: Option<usize>,
pub broadcast_buffer: usize, // events to buffer for subscribers (prevents slow readers from consuming memory)
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
pub event_kind_blacklist: Option<Vec<u64>>
pub event_kind_blacklist: Option<Vec<u64>>,
pub event_kind_allowlist: Option<Vec<u64>>,
pub limit_scrapers: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Authorization {
pub pubkey_whitelist: Option<Vec<String>>, // If present, only allow these pubkeys to publish events
pub nip42_auth: bool, // if true enables NIP-42 authentication
pub nip42_dms: bool, // if true send DMs only to their authenticated recipients
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct PayToRelay {
pub enabled: bool,
pub admission_cost: u64, // Cost to have pubkey whitelisted
pub cost_per_event: u64, // Cost author to pay per event
pub node_url: String,
pub api_secret: String,
pub terms_message: String,
pub sign_ups: bool, // allow new users to sign up to relay
pub direct_message: bool, // Send direct message to user with invoice and terms
pub secret_key: Option<String>,
pub processor: Processor,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -145,6 +168,13 @@ impl VerifiedUsers {
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Logging {
pub folder_path: Option<String>,
pub file_prefix: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Settings {
@ -155,38 +185,48 @@ pub struct Settings {
pub network: Network,
pub limits: Limits,
pub authorization: Authorization,
pub pay_to_relay: PayToRelay,
pub verified_users: VerifiedUsers,
pub retention: Retention,
pub options: Options,
pub logging: Logging,
}
impl Settings {
#[must_use]
pub fn new(config_file_name: &Option<String>) -> Self {
pub fn new(config_file_name: &Option<String>) -> Result<Self, ConfigError> {
let default_settings = Self::default();
// attempt to construct settings with file
let from_file = Self::new_from_default(&default_settings, config_file_name);
match from_file {
Ok(f) => f,
Err(e) => {
warn!("Error reading config file ({:?})", e);
default_settings
// pass up the parse error if the config file was specified,
// otherwise use the default config (with a warning).
if config_file_name.is_some() {
Err(e)
} else {
eprintln!("Error reading config file ({:?})", e);
eprintln!("WARNING: Default configuration settings will be used");
Ok(default_settings)
}
}
ok => ok,
}
}
fn new_from_default(default: &Settings, config_file_name: &Option<String>) -> Result<Self, ConfigError> {
fn new_from_default(
default: &Settings,
config_file_name: &Option<String>,
) -> Result<Self, ConfigError> {
let default_config_file_name = "config.toml".to_string();
let config: &String = match config_file_name {
Some(value) => value,
None => &default_config_file_name
None => &default_config_file_name,
};
let builder = Config::builder();
let config: Config = builder
// use defaults
// use defaults
.add_source(Config::try_from(default)?)
// override with file contents
// override with file contents
.add_source(File::with_name(config))
.build()?;
let mut settings: Settings = config.try_deserialize()?;
@ -204,6 +244,23 @@ impl Settings {
);
// initialize durations for verified users
settings.verified_users.init();
// Validate pay to relay settings
if settings.pay_to_relay.enabled {
assert_ne!(settings.pay_to_relay.api_secret, "");
// Should check that url is valid
assert_ne!(settings.pay_to_relay.node_url, "");
assert_ne!(settings.pay_to_relay.terms_message, "");
if settings.pay_to_relay.direct_message {
assert_ne!(
settings.pay_to_relay.secret_key,
Some("<nostr nsec>".to_string())
);
assert!(settings.pay_to_relay.secret_key.is_some());
}
}
Ok(settings)
}
}
@ -217,6 +274,8 @@ impl Default for Settings {
description: None,
pubkey: None,
contact: None,
favicon: None,
relay_icon: None,
},
diagnostics: Diagnostics { tracing: false },
database: Database {
@ -225,10 +284,12 @@ impl Default for Settings {
in_memory: false,
min_conn: 4,
max_conn: 8,
connection: "".to_owned(),
connection: "".to_owned(),
connection_write: None,
},
grpc: Grpc {
event_admission_server: None,
restricts_write: false,
},
network: Network {
port: 8080,
@ -247,9 +308,25 @@ impl Default for Settings {
broadcast_buffer: 16384,
event_persist_buffer: 4096,
event_kind_blacklist: None,
event_kind_allowlist: None,
limit_scrapers: false
},
authorization: Authorization {
pubkey_whitelist: None, // Allow any address to publish
nip42_auth: false, // Disable NIP-42 authentication
nip42_dms: false, // Send DMs to everybody
},
pay_to_relay: PayToRelay {
enabled: false,
admission_cost: 4200,
cost_per_event: 0,
terms_message: "".to_string(),
node_url: "".to_string(),
api_secret: "".to_string(),
sign_ups: false,
direct_message: false,
secret_key: None,
processor: Processor::LNBits,
},
verified_users: VerifiedUsers {
mode: VerifiedUsersMode::Disabled,
@ -270,6 +347,10 @@ impl Default for Settings {
options: Options {
reject_future_seconds: None, // Reject events in the future if defined
},
logging: Logging {
folder_path: None,
file_prefix: None,
},
}
}
}

View File

@ -1,16 +1,30 @@
//! Client connection state
use crate::close::Close;
use crate::error::Error;
use crate::error::Result;
use crate::subscription::Subscription;
use std::collections::HashMap;
use tracing::{debug, trace};
use uuid::Uuid;
use crate::close::Close;
use crate::conn::Nip42AuthState::{AuthPubkey, Challenge, NoAuth};
use crate::error::Error;
use crate::error::Result;
use crate::event::Event;
use crate::subscription::Subscription;
use crate::utils::{host_str, unix_time};
/// A subscription identifier has a maximum length
const MAX_SUBSCRIPTION_ID_LEN: usize = 256;
/// NIP-42 authentication state
pub enum Nip42AuthState {
/// The client is not authenticated yet
NoAuth,
/// The AUTH challenge sent
Challenge(String),
/// The client is authenticated
AuthPubkey(String),
}
/// State for a client connection
pub struct ClientConn {
/// Client IP (either from socket, or configured proxy header
@ -21,6 +35,8 @@ pub struct ClientConn {
subscriptions: HashMap<String, Subscription>,
/// Per-connection maximum concurrent subscriptions
max_subs: usize,
/// NIP-42 AUTH
auth: Nip42AuthState,
}
impl Default for ClientConn {
@ -39,15 +55,18 @@ impl ClientConn {
client_id,
subscriptions: HashMap::new(),
max_subs: 32,
auth: NoAuth,
}
}
#[must_use] pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
#[must_use]
pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
&self.subscriptions
}
/// Check if the given subscription already exists
#[must_use] pub fn has_subscription(&self, sub: &Subscription) -> bool {
#[must_use]
pub fn has_subscription(&self, sub: &Subscription) -> bool {
self.subscriptions.values().any(|x| x == sub)
}
@ -63,6 +82,22 @@ impl ClientConn {
&self.client_ip_addr
}
#[must_use]
pub fn auth_pubkey(&self) -> Option<&String> {
match &self.auth {
AuthPubkey(pubkey) => Some(pubkey),
_ => None,
}
}
#[must_use]
pub fn auth_challenge(&self) -> Option<&String> {
match &self.auth {
Challenge(pubkey) => Some(pubkey),
_ => None,
}
}
/// Add a new subscription for this connection.
/// # Errors
///
@ -116,4 +151,79 @@ impl ClientConn {
self.get_client_prefix(),
);
}
pub fn generate_auth_challenge(&mut self) {
self.auth = Challenge(Uuid::new_v4().to_string());
}
pub fn authenticate(&mut self, event: &Event, relay_url: &str) -> Result<()> {
match &self.auth {
Challenge(_) => (),
AuthPubkey(_) => {
// already authenticated
return Ok(());
}
NoAuth => {
// unexpected AUTH request
return Err(Error::AuthFailure);
}
}
match event.validate() {
Ok(_) => {
if event.kind != 22242 {
return Err(Error::AuthFailure);
}
let curr_time = unix_time();
let past_cutoff = curr_time - 600; // 10 minutes
let future_cutoff = curr_time + 600; // 10 minutes
if event.created_at < past_cutoff || event.created_at > future_cutoff {
return Err(Error::AuthFailure);
}
let mut challenge: Option<&str> = None;
let mut relay: Option<&str> = None;
for tag in &event.tags {
if tag.len() == 2 && tag.first() == Some(&"challenge".into()) {
challenge = tag.get(1).map(|x| x.as_str());
}
if tag.len() == 2 && tag.first() == Some(&"relay".into()) {
relay = tag.get(1).map(|x| x.as_str());
}
}
match (challenge, &self.auth) {
(Some(received_challenge), Challenge(sent_challenge)) => {
if received_challenge != sent_challenge {
return Err(Error::AuthFailure);
}
}
(_, _) => {
return Err(Error::AuthFailure);
}
}
match (relay.and_then(host_str), host_str(relay_url)) {
(Some(received_relay), Some(our_relay)) => {
if received_relay != our_relay {
return Err(Error::AuthFailure);
}
}
(_, _) => {
return Err(Error::AuthFailure);
}
}
self.auth = AuthPubkey(event.pubkey.clone());
trace!(
"authenticated pubkey {} (cid: {})",
event.pubkey.chars().take(8).collect::<String>(),
self.get_client_prefix()
);
Ok(())
}
Err(_) => Err(Error::AuthFailure),
}
}
}

240
src/db.rs
View File

@ -2,22 +2,25 @@
use crate::config::Settings;
use crate::error::{Error, Result};
use crate::event::Event;
use crate::notice::Notice;
use crate::server::NostrMetrics;
use crate::nauthz;
use crate::notice::Notice;
use crate::payment::PaymentMessage;
use crate::repo::postgres::{PostgresPool, PostgresRepo};
use crate::repo::sqlite::SqliteRepo;
use crate::repo::NostrRepo;
use crate::server::NostrMetrics;
use governor::clock::Clock;
use governor::{Quota, RateLimiter};
use log::LevelFilter;
use nostr::key::FromPkStr;
use nostr::key::Keys;
use r2d2;
use std::sync::Arc;
use std::thread;
use sqlx::pool::PoolOptions;
use sqlx::postgres::PgConnectOptions;
use sqlx::ConnectOptions;
use crate::repo::sqlite::SqliteRepo;
use crate::repo::postgres::{PostgresRepo,PostgresPool};
use crate::repo::NostrRepo;
use std::time::{Instant, Duration};
use tracing::log::LevelFilter;
use std::sync::Arc;
use std::thread;
use std::time::{Duration, Instant};
use tracing::{debug, info, trace, warn};
pub type SqlitePool = r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>;
@ -30,6 +33,7 @@ pub struct SubmittedEvent {
pub source_ip: String,
pub origin: Option<String>,
pub user_agent: Option<String>,
pub auth_pubkey: Option<Vec<u8>>,
}
/// Database file
@ -41,8 +45,8 @@ pub const DB_FILE: &str = "nostr.db";
/// Will panic if the pool could not be created.
pub async fn build_repo(settings: &Settings, metrics: NostrMetrics) -> Arc<dyn NostrRepo> {
match settings.database.engine.as_str() {
"sqlite" => {Arc::new(build_sqlite_pool(settings, metrics).await)},
"postgres" => {Arc::new(build_postgres_pool(settings, metrics).await)},
"sqlite" => Arc::new(build_sqlite_pool(settings, metrics).await),
"postgres" => Arc::new(build_postgres_pool(settings, metrics).await),
_ => panic!("Unknown database engine"),
}
}
@ -66,10 +70,31 @@ async fn build_postgres_pool(settings: &Settings, metrics: NostrMetrics) -> Post
.connect_with(options)
.await
.unwrap();
let repo = PostgresRepo::new(pool, metrics);
let write_pool: PostgresPool = match &settings.database.connection_write {
Some(cfg_write) => {
let mut options_write: PgConnectOptions = cfg_write.as_str().parse().unwrap();
options_write.log_statements(LevelFilter::Debug);
options_write.log_slow_statements(LevelFilter::Warn, Duration::from_secs(60));
PoolOptions::new()
.max_connections(settings.database.max_conn)
.min_connections(settings.database.min_conn)
.idle_timeout(Duration::from_secs(60))
.connect_with(options_write)
.await
.unwrap()
}
None => pool.clone(),
};
let repo = PostgresRepo::new(pool, write_pool, metrics);
// Panic on migration failure
let version = repo.migrate_up().await.unwrap();
info!("Postgres migration completed, at v{}", version);
// startup scheduled tasks
repo.start().await.ok();
repo
}
@ -80,6 +105,7 @@ pub async fn db_writer(
mut event_rx: tokio::sync::mpsc::Receiver<SubmittedEvent>,
bcast_tx: tokio::sync::broadcast::Sender<Event>,
metadata_tx: tokio::sync::broadcast::Sender<Event>,
payment_tx: tokio::sync::broadcast::Sender<PaymentMessage>,
mut shutdown: tokio::sync::broadcast::Receiver<()>,
) -> Result<()> {
// are we performing NIP-05 checking?
@ -87,6 +113,10 @@ pub async fn db_writer(
// are we requriing NIP-05 user verification?
let nip05_enabled = settings.verified_users.is_enabled();
let pay_to_relay_enabled = settings.pay_to_relay.enabled;
let cost_per_event = settings.pay_to_relay.cost_per_event;
debug!("Pay to relay: {}", pay_to_relay_enabled);
//upgrade_db(&mut pool.get()?)?;
// Make a copy of the whitelist
@ -113,8 +143,8 @@ pub async fn db_writer(
};
//let gprc_client = settings.grpc.event_admission_server.map(|s| {
// event_admitter_connect(&s);
// });
// event_admitter_connect(&s);
// });
loop {
if shutdown.try_recv().is_ok() {
@ -133,24 +163,6 @@ pub async fn db_writer(
let subm_event = next_event.unwrap();
let event = subm_event.event;
let notice_tx = subm_event.notice_tx;
// check if this event is authorized.
if let Some(allowed_addrs) = whitelist {
// TODO: incorporate delegated pubkeys
// if the event address is not in allowed_addrs.
if !allowed_addrs.contains(&event.pubkey) {
debug!(
"rejecting event: {}, unauthorized author",
event.get_event_id_prefix()
);
notice_tx
.try_send(Notice::blocked(
event.id,
"pubkey is not allowed to publish to this relay",
))
.ok();
continue;
}
}
// Check that event kind isn't blacklisted
let kinds_blacklist = &settings.limits.event_kind_blacklist.clone();
@ -162,15 +174,113 @@ pub async fn db_writer(
&event.kind
);
notice_tx
.try_send(Notice::blocked(
event.id,
"event kind is blocked by relay"
))
.try_send(Notice::blocked(event.id, "event kind is blocked by relay"))
.ok();
continue;
}
}
// Check that event kind isn't allowlisted
let kinds_allowlist = &settings.limits.event_kind_allowlist.clone();
if let Some(event_kind_allowlist) = kinds_allowlist {
if !event_kind_allowlist.contains(&event.kind) {
debug!(
"rejecting event: {}, allowlist kind: {}",
&event.get_event_id_prefix(),
&event.kind
);
notice_tx
.try_send(Notice::blocked(event.id, "event kind is blocked by relay"))
.ok();
continue;
}
}
// Set to none until balance is got from db
// Will stay none if user in whitelisted and does not have to pay to post
// When pay to relay is enabled the whitelist is not a list of who can post
// It is a list of who can post for free
let mut user_balance: Option<u64> = None;
if !pay_to_relay_enabled {
// check if this event is authorized.
if let Some(allowed_addrs) = whitelist {
// TODO: incorporate delegated pubkeys
// if the event address is not in allowed_addrs.
if !allowed_addrs.contains(&event.pubkey) {
debug!(
"rejecting event: {}, unauthorized author",
event.get_event_id_prefix()
);
notice_tx
.try_send(Notice::blocked(
event.id,
"pubkey is not allowed to publish to this relay",
))
.ok();
continue;
}
}
} else {
// If the user is on whitelist there is no need to check if the user is admitted or has balance to post
if whitelist.is_none()
|| (whitelist.is_some() && !whitelist.as_ref().unwrap().contains(&event.pubkey))
{
let key = Keys::from_pk_str(&event.pubkey).unwrap();
match repo.get_account_balance(&key).await {
Ok((user_admitted, balance)) => {
// Checks to make sure user is admitted
if !user_admitted {
debug!("user: {}, is not admitted", &event.pubkey);
// If the user is in DB but not admitted
// Send meeage to payment thread to check if outstanding invoice has been paid
payment_tx
.send(PaymentMessage::CheckAccount(event.pubkey))
.ok();
notice_tx
.try_send(Notice::blocked(event.id, "User is not admitted"))
.ok();
continue;
}
// Checks that user has enough balance to post
// TODO: this should send an invoice to user to top up
if balance < cost_per_event {
debug!("user: {}, does not have a balance", &event.pubkey,);
notice_tx
.try_send(Notice::blocked(event.id, "Insufficient balance"))
.ok();
continue;
}
user_balance = Some(balance);
debug!("User balance: {:?}", user_balance);
}
Err(
Error::SqlError(rusqlite::Error::QueryReturnedNoRows)
| Error::SqlxError(sqlx::Error::RowNotFound),
) => {
// User does not exist
info!("Unregistered user");
if settings.pay_to_relay.sign_ups && settings.pay_to_relay.direct_message {
payment_tx
.send(PaymentMessage::NewAccount(event.pubkey))
.ok();
}
let msg = "Pubkey not registered";
notice_tx.try_send(Notice::error(event.id, msg)).ok();
continue;
}
Err(err) => {
warn!("Error checking admission status: {:?}", err);
let msg = "relay experienced an error checking your admission status";
notice_tx.try_send(Notice::error(event.id, msg)).ok();
// Other error
continue;
}
}
}
}
// send any metadata events to the NIP-05 verifier
if nip05_active && event.is_kind_metadata() {
// we are sending this prior to even deciding if we
@ -187,7 +297,6 @@ pub async fn db_writer(
None
};
// check for NIP-05 verification
if nip05_enabled && validation.is_some() {
match validation.as_ref().unwrap() {
@ -198,7 +307,6 @@ pub async fn db_writer(
uv.name.to_string(),
event.get_author_prefix()
);
} else {
info!(
"rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)",
@ -214,7 +322,10 @@ pub async fn db_writer(
continue;
}
}
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
Err(
Error::SqlError(rusqlite::Error::QueryReturnedNoRows)
| Error::SqlxError(sqlx::Error::RowNotFound),
) => {
debug!(
"no verification records found for pubkey: {:?}",
event.get_author_prefix()
@ -235,27 +346,44 @@ pub async fn db_writer(
}
// nip05 address
let nip05_address : Option<crate::nip05::Nip05Name> = validation.and_then(|x| x.ok().map(|y| y.name));
let nip05_address: Option<crate::nip05::Nip05Name> =
validation.and_then(|x| x.ok().map(|y| y.name));
// GRPC check
if let Some(ref mut c) = grpc_client {
trace!("checking if grpc permits");
let grpc_start = Instant::now();
let decision_res = c.admit_event(&event, &subm_event.source_ip, subm_event.origin, subm_event.user_agent, nip05_address).await;
let decision_res = c
.admit_event(
&event,
&subm_event.source_ip,
subm_event.origin,
subm_event.user_agent,
nip05_address,
subm_event.auth_pubkey,
)
.await;
match decision_res {
Ok(decision) => {
if !decision.permitted() {
// GPRC returned a decision to reject this event
info!("GRPC rejected event: {:?} (kind: {}) from: {:?} in: {:?} (IP: {:?})",
event.get_event_id_prefix(),
event.kind,
event.get_author_prefix(),
grpc_start.elapsed(),
subm_event.source_ip);
notice_tx.try_send(Notice::blocked(event.id, &decision.message().unwrap_or_else(|| "".to_string()))).ok();
info!(
"GRPC rejected event: {:?} (kind: {}) from: {:?} in: {:?} (IP: {:?})",
event.get_event_id_prefix(),
event.kind,
event.get_author_prefix(),
grpc_start.elapsed(),
subm_event.source_ip
);
notice_tx
.try_send(Notice::blocked(
event.id,
&decision.message().unwrap_or_default(),
))
.ok();
continue;
}
},
}
Err(e) => {
warn!("GRPC server error: {:?}", e);
}
@ -273,6 +401,9 @@ pub async fn db_writer(
start.elapsed()
);
event_write = true;
// send OK message
notice_tx.try_send(Notice::saved(event.id)).ok();
} else {
match repo.write_event(&event).await {
Ok(updated) => {
@ -304,6 +435,17 @@ pub async fn db_writer(
// use rate limit, if defined, and if an event was actually written.
if event_write {
// If pay to relay is diabaled or the cost per event is 0
// No need to update user balance
if pay_to_relay_enabled && cost_per_event > 0 {
// If the user balance is some, user was not on whitelist
// Their balance should be reduced by the cost per event
if let Some(_balance) = user_balance {
let pubkey = Keys::from_pk_str(&event.pubkey)?;
repo.update_account_balance(&pubkey, false, cost_per_event)
.await?;
}
}
if let Some(ref lim) = lim_opt {
if let Err(n) = lim.check() {
let wait_for = n.wait_time_from(clock.now());

View File

@ -84,7 +84,8 @@ pub struct ConditionQuery {
}
impl ConditionQuery {
#[must_use] pub fn allows_event(&self, event: &Event) -> bool {
#[must_use]
pub fn allows_event(&self, event: &Event) -> bool {
// check each condition, to ensure that the event complies
// with the restriction.
for c in &self.conditions {
@ -101,7 +102,8 @@ impl ConditionQuery {
}
// Verify that the delegator approved the delegation; return a ConditionQuery if so.
#[must_use] pub fn validate_delegation(
#[must_use]
pub fn validate_delegation(
delegator: &str,
delegatee: &str,
cond_query: &str,
@ -144,7 +146,8 @@ pub struct Condition {
impl Condition {
/// Check if this condition allows the given event to be delegated
#[must_use] pub fn allows_event(&self, event: &Event) -> bool {
#[must_use]
pub fn allows_event(&self, event: &Event) -> bool {
// determine what the right-hand side of the operator is
let resolved_field = match &self.field {
Field::Kind => event.kind,

View File

@ -68,6 +68,20 @@ pub enum Error {
AuthzError,
#[error("Tonic GRPC error")]
TonicError(tonic::Status),
#[error("Invalid AUTH message")]
AuthFailure,
#[error("I/O Error")]
IoError(std::io::Error),
#[error("Event builder error")]
EventError(nostr::event::builder::Error),
#[error("Nostr key error")]
NostrKeyError(nostr::key::Error),
#[error("Payment hash mismatch")]
PaymentHash,
#[error("Error parsing url")]
URLParseError(url::ParseError),
#[error("HTTP error")]
HTTPError(http::Error),
#[error("Unknown/Undocumented")]
UnknownError,
}
@ -143,3 +157,36 @@ impl From<tonic::Status> for Error {
Error::TonicError(r)
}
}
impl From<std::io::Error> for Error {
fn from(r: std::io::Error) -> Self {
Error::IoError(r)
}
}
impl From<nostr::event::builder::Error> for Error {
/// Wrap event builder error
fn from(r: nostr::event::builder::Error) -> Self {
Error::EventError(r)
}
}
impl From<nostr::key::Error> for Error {
/// Wrap nostr key error
fn from(r: nostr::key::Error) -> Self {
Error::NostrKeyError(r)
}
}
impl From<url::ParseError> for Error {
/// Wrap nostr key error
fn from(r: url::ParseError) -> Self {
Error::URLParseError(r)
}
}
impl From<http::Error> for Error {
/// Wrap nostr key error
fn from(r: http::Error) -> Self {
Error::HTTPError(r)
}
}

View File

@ -1,7 +1,12 @@
//! Event parsing and validation
use crate::delegation::validate_delegation;
use crate::error::Error::{CommandUnknownError, EventCouldNotCanonicalize, EventInvalidId, EventInvalidSignature, EventMalformedPubkey};
use crate::error::Error::{
CommandUnknownError, EventCouldNotCanonicalize, EventInvalidId, EventInvalidSignature,
EventMalformedPubkey,
};
use crate::error::Result;
use crate::event::EventWrapper::WrappedAuth;
use crate::event::EventWrapper::WrappedEvent;
use crate::nip05;
use crate::utils::unix_time;
use bitcoin_hashes::{sha256, Hash};
@ -28,7 +33,8 @@ pub struct EventCmd {
}
impl EventCmd {
#[must_use] pub fn event_id(&self) -> &str {
#[must_use]
pub fn event_id(&self) -> &str {
&self.event.id
}
}
@ -65,7 +71,8 @@ where
}
/// Attempt to form a single-char tag name.
#[must_use] pub fn single_char_tagname(tagname: &str) -> Option<char> {
#[must_use]
pub fn single_char_tagname(tagname: &str) -> Option<char> {
// We return the tag character if and only if the tagname consists
// of a single char.
let mut tagnamechars = tagname.chars();
@ -83,17 +90,26 @@ where
}
}
pub enum EventWrapper {
WrappedEvent(Event),
WrappedAuth(Event),
}
/// Convert network event to parsed/validated event.
impl From<EventCmd> for Result<Event> {
fn from(ec: EventCmd) -> Result<Event> {
impl From<EventCmd> for Result<EventWrapper> {
fn from(ec: EventCmd) -> Result<EventWrapper> {
// ensure command is correct
if ec.cmd == "EVENT" {
ec.event.validate().map(|_| {
let mut e = ec.event;
e.build_index();
e.update_delegation();
e
WrappedEvent(e)
})
} else if ec.cmd == "AUTH" {
// we don't want to validate the event here, because NIP-42 can be disabled
// it will be validated later during the authentication process
Ok(WrappedAuth(ec.event))
} else {
Err(CommandUnknownError)
}
@ -102,7 +118,8 @@ impl From<EventCmd> for Result<Event> {
impl Event {
#[cfg(test)]
#[must_use] pub fn simple_event() -> Event {
#[must_use]
pub fn simple_event() -> Event {
Event {
id: "0".to_owned(),
pubkey: "0".to_owned(),
@ -116,41 +133,73 @@ impl Event {
}
}
#[must_use] pub fn is_kind_metadata(&self) -> bool {
#[must_use]
pub fn is_kind_metadata(&self) -> bool {
self.kind == 0
}
/// Should this event be persisted?
#[must_use] pub fn is_ephemeral(&self) -> bool {
#[must_use]
pub fn is_ephemeral(&self) -> bool {
self.kind >= 20000 && self.kind < 30000
}
/// Is this event currently expired?
pub fn is_expired(&self) -> bool {
if let Some(exp) = self.expiration() {
exp <= unix_time()
} else {
false
}
}
/// Determine the time at which this event should expire
pub fn expiration(&self) -> Option<u64> {
let default = "".to_string();
let dvals: Vec<&String> = self
.tags
.iter()
.filter(|x| !x.is_empty())
.filter(|x| x.first().unwrap() == "expiration")
.map(|x| x.get(1).unwrap_or(&default))
.take(1)
.collect();
let val_first = dvals.first();
val_first.and_then(|t| t.parse::<u64>().ok())
}
/// Should this event be replaced with newer timestamps from same author?
#[must_use] pub fn is_replaceable(&self) -> bool {
self.kind == 0 || self.kind == 3 || self.kind == 41 || (self.kind >= 10000 && self.kind < 20000)
#[must_use]
pub fn is_replaceable(&self) -> bool {
self.kind == 0
|| self.kind == 3
|| self.kind == 41
|| (self.kind >= 10000 && self.kind < 20000)
}
/// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values?
#[must_use] pub fn is_param_replaceable(&self) -> bool {
#[must_use]
pub fn is_param_replaceable(&self) -> bool {
self.kind >= 30000 && self.kind < 40000
}
/// What is the replaceable `d` tag value?
/// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values?
#[must_use] pub fn distinct_param(&self) -> Option<String> {
#[must_use]
pub fn distinct_param(&self) -> Option<String> {
if self.is_param_replaceable() {
let default = "".to_string();
let dvals:Vec<&String> = self.tags
let dvals: Vec<&String> = self
.tags
.iter()
.filter(|x| !x.is_empty())
.filter(|x| x.get(0).unwrap() == "d")
.map(|x| x.get(1).unwrap_or(&default)).take(1)
.filter(|x| x.first().unwrap() == "d")
.map(|x| x.get(1).unwrap_or(&default))
.take(1)
.collect();
let dval_first = dvals.get(0);
let dval_first = dvals.first();
match dval_first {
Some(_) => {dval_first.map(|x| x.to_string())},
None => Some(default)
Some(_) => dval_first.map(|x| x.to_string()),
None => Some(default),
}
} else {
None
@ -158,7 +207,8 @@ impl Event {
}
/// Pull a NIP-05 Name out of the event, if one exists
#[must_use] pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
#[must_use]
pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
if self.is_kind_metadata() {
// very quick check if we should attempt to parse this json
if self.content.contains("\"nip05\"") {
@ -175,15 +225,17 @@ impl Event {
// is this event delegated (properly)?
// does the signature match, and are conditions valid?
// if so, return an alternate author for the event
#[must_use] pub fn delegated_author(&self) -> Option<String> {
#[must_use]
pub fn delegated_author(&self) -> Option<String> {
// is there a delegation tag?
let delegation_tag: Vec<String> = self
.tags
.iter()
.filter(|x| x.len() == 4)
.filter(|x| x.get(0).unwrap() == "delegation")
.filter(|x| x.first().unwrap() == "delegation")
.take(1)
.next()?.clone(); // get first tag
.next()?
.clone(); // get first tag
//let delegation_tag = self.tag_values_by_name("delegation");
// delegation tags should have exactly 3 elements after the name (pubkey, condition, sig)
@ -225,7 +277,7 @@ impl Event {
let mut idx: HashMap<char, HashSet<String>> = HashMap::new();
// iterate over tags that have at least 2 elements
for t in self.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagname = t.first().unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
@ -233,7 +285,7 @@ impl Event {
let tagnamechar = tagnamechar_opt.unwrap();
let tagval = t.get(1).unwrap();
// ensure a vector exists for this tag
idx.entry(tagnamechar).or_insert_with(HashSet::new);
idx.entry(tagnamechar).or_default();
// get the tag vec and insert entry
let idx_tag_vec = idx.get_mut(&tagnamechar).expect("could not get tag vector");
idx_tag_vec.insert(tagval.clone());
@ -243,24 +295,28 @@ impl Event {
}
/// Create a short event identifier, suitable for logging.
#[must_use] pub fn get_event_id_prefix(&self) -> String {
#[must_use]
pub fn get_event_id_prefix(&self) -> String {
self.id.chars().take(8).collect()
}
#[must_use] pub fn get_author_prefix(&self) -> String {
#[must_use]
pub fn get_author_prefix(&self) -> String {
self.pubkey.chars().take(8).collect()
}
/// Retrieve tag initial values across all tags matching the name
#[must_use] pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
#[must_use]
pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
self.tags
.iter()
.filter(|x| x.len() > 1)
.filter(|x| x.get(0).unwrap() == tag_name)
.filter(|x| x.first().unwrap() == tag_name)
.map(|x| x.get(1).unwrap().clone())
.collect()
}
#[must_use] pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
#[must_use]
pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
if let Some(allowable_future) = reject_future_seconds {
let curr_time = unix_time();
// calculate difference, plus how far future we allow
@ -299,7 +355,7 @@ impl Event {
return Err(EventInvalidId);
}
// * validate the message digest (sig) using the pubkey & computed sha256 message hash.
let sig = schnorr::Signature::from_str(&self.sig).unwrap();
let sig = schnorr::Signature::from_str(&self.sig).map_err(|_| EventInvalidSignature)?;
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
if let Ok(pubkey) = XOnlyPublicKey::from_str(&self.pubkey) {
SECP.verify_schnorr(&sig, &msg, &pubkey)
@ -315,7 +371,7 @@ impl Event {
}
/// Convert event to canonical representation for signing.
fn to_canonical(&self) -> Option<String> {
pub fn to_canonical(&self) -> Option<String> {
// create a JsonValue for each event element
let mut c: Vec<Value> = vec![];
// id must be set to 0
@ -352,7 +408,8 @@ impl Event {
}
/// Determine if the given tag and value set intersect with tags in this event.
#[must_use] pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
#[must_use]
pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
match &self.tagidx {
// check if this is indexable tagname
Some(idx) => match idx.get(&tagname) {
@ -367,6 +424,22 @@ impl Event {
}
}
impl From<nostr::Event> for Event {
fn from(nostr_event: nostr::Event) -> Self {
Event {
id: nostr_event.id.to_hex(),
pubkey: nostr_event.pubkey.to_string(),
created_at: nostr_event.created_at.as_u64(),
kind: nostr_event.kind.as_u64(),
tags: nostr_event.tags.iter().map(|x| x.as_vec()).collect(),
content: nostr_event.content,
sig: nostr_event.sig.to_string(),
delegated_by: None,
tagidx: None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
@ -391,7 +464,7 @@ mod tests {
fn empty_event_tag_match() {
let event = Event::simple_event();
assert!(!event
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
}
#[test]
@ -399,12 +472,11 @@ mod tests {
let mut event = Event::simple_event();
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
event.build_index();
assert_eq!(
assert!(
event.generic_tag_val_intersect(
'e',
&HashSet::from(["foo".to_owned(), "bar".to_owned()])
),
true
)
);
}
@ -539,28 +611,28 @@ mod tests {
#[test]
fn ephemeral_event() {
let mut event = Event::simple_event();
event.kind=20000;
event.kind = 20000;
assert!(event.is_ephemeral());
event.kind=29999;
event.kind = 29999;
assert!(event.is_ephemeral());
event.kind=30000;
event.kind = 30000;
assert!(!event.is_ephemeral());
event.kind=19999;
event.kind = 19999;
assert!(!event.is_ephemeral());
}
#[test]
fn replaceable_event() {
let mut event = Event::simple_event();
event.kind=0;
event.kind = 0;
assert!(event.is_replaceable());
event.kind=3;
event.kind = 3;
assert!(event.is_replaceable());
event.kind=10000;
event.kind = 10000;
assert!(event.is_replaceable());
event.kind=19999;
event.kind = 19999;
assert!(event.is_replaceable());
event.kind=20000;
event.kind = 20000;
assert!(!event.is_replaceable());
}
@ -582,8 +654,7 @@ mod tests {
// NIP case #1: "tags":[["d",""]]
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![
vec!["d".to_owned(), "".to_owned()]];
event.tags = vec![vec!["d".to_owned(), "".to_owned()]];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
@ -600,8 +671,7 @@ mod tests {
// NIP case #3: "tags":[["d"]]: implicit empty value ""
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![
vec!["d".to_owned()]];
event.tags = vec![vec!["d".to_owned()]];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
@ -612,7 +682,7 @@ mod tests {
event.kind = 30000;
event.tags = vec![
vec!["d".to_owned(), "".to_string()],
vec!["d".to_owned(), "not empty".to_string()]
vec!["d".to_owned(), "not empty".to_string()],
];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
@ -625,7 +695,7 @@ mod tests {
event.kind = 30000;
event.tags = vec![
vec!["d".to_owned(), "not empty".to_string()],
vec!["d".to_owned(), "".to_string()]
vec!["d".to_owned(), "".to_string()],
];
assert_eq!(event.distinct_param(), Some("not empty".to_string()));
}
@ -638,7 +708,7 @@ mod tests {
event.tags = vec![
vec!["d".to_owned()],
vec!["d".to_owned(), "second value".to_string()],
vec!["d".to_owned(), "third value".to_string()]
vec!["d".to_owned(), "third value".to_string()],
];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
@ -648,10 +718,77 @@ mod tests {
// NIP case #6: "tags":[["e"]]: same as no tags
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![
vec!["e".to_owned()],
];
event.tags = vec![vec!["e".to_owned()]];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
#[test]
fn expiring_event_none() {
// regular events do not expire
let mut event = Event::simple_event();
event.kind = 7;
event.tags = vec![vec!["test".to_string(), "foo".to_string()]];
assert_eq!(event.expiration(), None);
}
#[test]
fn expiring_event_empty() {
// regular events do not expire
let mut event = Event::simple_event();
event.kind = 7;
event.tags = vec![vec!["expiration".to_string()]];
assert_eq!(event.expiration(), None);
}
#[test]
fn expiring_event_future() {
// a normal expiring event
let exp: u64 = 1676264138;
let mut event = Event::simple_event();
event.kind = 1;
event.tags = vec![vec!["expiration".to_string(), exp.to_string()]];
assert_eq!(event.expiration(), Some(exp));
}
#[test]
fn expiring_event_negative() {
// expiration set to a negative value (invalid)
let exp: i64 = -90;
let mut event = Event::simple_event();
event.kind = 1;
event.tags = vec![vec!["expiration".to_string(), exp.to_string()]];
assert_eq!(event.expiration(), None);
}
#[test]
fn expiring_event_zero() {
// a normal expiring event set to zero
let exp: i64 = 0;
let mut event = Event::simple_event();
event.kind = 1;
event.tags = vec![vec!["expiration".to_string(), exp.to_string()]];
assert_eq!(event.expiration(), Some(0));
}
#[test]
fn expiring_event_fraction() {
// expiration is fractional (invalid)
let exp: f64 = 23.334;
let mut event = Event::simple_event();
event.kind = 1;
event.tags = vec![vec!["expiration".to_string(), exp.to_string()]];
assert_eq!(event.expiration(), None);
}
#[test]
fn expiring_event_multiple() {
// multiple values, we just take the first
let mut event = Event::simple_event();
event.kind = 1;
event.tags = vec![
vec!["expiration".to_string(), (10).to_string()],
vec!["expiration".to_string(), (20).to_string()],
];
assert_eq!(event.expiration(), Some(10));
}
}

View File

@ -1,158 +0,0 @@
//! Utilities for searching hexadecimal
use crate::utils::{is_hex};
use hex;
/// Types of hexadecimal queries.
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)]
pub enum HexSearch {
// when no range is needed, exact 32-byte
Exact(Vec<u8>),
// lower (inclusive) and upper range (exclusive)
Range(Vec<u8>, Vec<u8>),
// lower bound only, upper bound is MAX inclusive
LowerOnly(Vec<u8>),
}
/// Check if a string contains only f chars
fn is_all_fs(s: &str) -> bool {
s.chars().all(|x| x == 'f' || x == 'F')
}
/// Find the next hex sequence greater than the argument.
#[must_use] pub fn hex_range(s: &str) -> Option<HexSearch> {
let mut hash_base = s.to_owned();
if !is_hex(&hash_base) || hash_base.len() > 64 {
return None;
}
if hash_base.len() == 64 {
return Some(HexSearch::Exact(hex::decode(&hash_base).ok()?));
}
// if s is odd, add a zero
let mut odd = hash_base.len() % 2 != 0;
if odd {
// extend the string to make it even
hash_base.push('0');
}
let base = hex::decode(hash_base).ok()?;
// check for all ff's
if is_all_fs(s) {
// there is no higher bound, we only want to search for blobs greater than this.
return Some(HexSearch::LowerOnly(base));
}
// return a range
let mut upper = base.clone();
let mut byte_len = upper.len();
// for odd strings, we made them longer, but we want to increment the upper char (+16).
// we know we can do this without overflowing because we explicitly set the bottom half to 0's.
while byte_len > 0 {
byte_len -= 1;
// check if byte can be incremented, or if we need to carry.
let b = upper[byte_len];
if b == u8::MAX {
// reset and carry
upper[byte_len] = 0;
} else if odd {
// check if first char in this byte is NOT 'f'
if b < 240 {
// bump up the first character in this byte
upper[byte_len] = b + 16;
// increment done, stop iterating through the vec
break;
}
// if it is 'f', reset the byte to 0 and do a carry
// reset and carry
upper[byte_len] = 0;
// done with odd logic, so don't repeat this
odd = false;
} else {
// bump up the first character in this byte
upper[byte_len] = b + 1;
// increment done, stop iterating
break;
}
}
Some(HexSearch::Range(base, upper))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::error::Result;
#[test]
fn hex_range_exact() -> Result<()> {
let hex = "abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00";
let r = hex_range(hex);
assert_eq!(
r,
Some(HexSearch::Exact(hex::decode(hex).expect("invalid hex")))
);
Ok(())
}
#[test]
fn hex_full_range() -> Result<()> {
let hex = "aaaa";
let hex_upper = "aaab";
let r = hex_range(hex);
assert_eq!(
r,
Some(HexSearch::Range(
hex::decode(hex).expect("invalid hex"),
hex::decode(hex_upper).expect("invalid hex")
))
);
Ok(())
}
#[test]
fn hex_full_range_odd() -> Result<()> {
let r = hex_range("abc");
assert_eq!(
r,
Some(HexSearch::Range(
hex::decode("abc0").expect("invalid hex"),
hex::decode("abd0").expect("invalid hex")
))
);
Ok(())
}
#[test]
fn hex_full_range_odd_end_f() -> Result<()> {
let r = hex_range("abf");
assert_eq!(
r,
Some(HexSearch::Range(
hex::decode("abf0").expect("invalid hex"),
hex::decode("ac00").expect("invalid hex")
))
);
Ok(())
}
#[test]
fn hex_no_upper() -> Result<()> {
let r = hex_range("ffff");
assert_eq!(
r,
Some(HexSearch::LowerOnly(
hex::decode("ffff").expect("invalid hex")
))
);
Ok(())
}
#[test]
fn hex_no_upper_odd() -> Result<()> {
let r = hex_range("fff");
assert_eq!(
r,
Some(HexSearch::LowerOnly(
hex::decode("fff0").expect("invalid hex")
))
);
Ok(())
}
}

View File

@ -1,9 +1,38 @@
//! Relay metadata using NIP-11
/// Relay Info
use crate::config;
use crate::config::Settings;
use serde::{Deserialize, Serialize};
pub const CARGO_PKG_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
pub const UNIT: &str = "msats";
/// Limitations of the relay as specified in NIP-111
/// (This nip isn't finalized so may change)
#[derive(Debug, Serialize, Deserialize)]
#[allow(unused)]
pub struct Limitation {
#[serde(skip_serializing_if = "Option::is_none")]
payment_required: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
restricted_writes: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug)]
#[allow(unused)]
pub struct Fees {
#[serde(skip_serializing_if = "Option::is_none")]
admission: Option<Vec<Fee>>,
#[serde(skip_serializing_if = "Option::is_none")]
publication: Option<Vec<Fee>>,
}
#[derive(Serialize, Deserialize, Debug)]
#[allow(unused)]
pub struct Fee {
amount: u64,
unit: String,
}
#[derive(Debug, Serialize, Deserialize)]
#[allow(unused)]
@ -19,25 +48,94 @@ pub struct RelayInfo {
#[serde(skip_serializing_if = "Option::is_none")]
pub contact: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub icon: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub supported_nips: Option<Vec<i64>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub software: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub limitation: Option<Limitation>,
#[serde(skip_serializing_if = "Option::is_none")]
pub payment_url: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub fees: Option<Fees>,
}
/// Convert an Info configuration into public Relay Info
impl From<config::Info> for RelayInfo {
fn from(i: config::Info) -> Self {
impl From<Settings> for RelayInfo {
fn from(c: Settings) -> Self {
let mut supported_nips = vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 33, 40];
if c.authorization.nip42_auth {
supported_nips.push(42);
supported_nips.sort();
}
let i = c.info;
let p = c.pay_to_relay;
let limitations = Limitation {
payment_required: Some(p.enabled),
restricted_writes: Some(
p.enabled
|| c.verified_users.is_enabled()
|| c.authorization.pubkey_whitelist.is_some()
|| c.grpc.restricts_write,
),
};
let (payment_url, fees) = if p.enabled {
let admission_fee = if p.admission_cost > 0 {
Some(vec![Fee {
amount: p.admission_cost * 1000,
unit: UNIT.to_string(),
}])
} else {
None
};
let post_fee = if p.cost_per_event > 0 {
Some(vec![Fee {
amount: p.cost_per_event * 1000,
unit: UNIT.to_string(),
}])
} else {
None
};
let fees = Fees {
admission: admission_fee,
publication: post_fee,
};
let payment_url = if p.enabled && i.relay_url.is_some() {
Some(format!(
"{}join",
i.relay_url.clone().unwrap().replace("ws", "http")
))
} else {
None
};
(payment_url, Some(fees))
} else {
(None, None)
};
RelayInfo {
id: i.relay_url,
name: i.name,
description: i.description,
pubkey: i.pubkey,
contact: i.contact,
supported_nips: Some(vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 33]),
supported_nips: Some(supported_nips),
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
version: CARGO_PKG_VERSION.map(std::borrow::ToOwned::to_owned),
limitation: Some(limitations),
payment_url,
fees,
icon: i.relay_icon,
}
}
}

View File

@ -6,13 +6,13 @@ pub mod db;
pub mod delegation;
pub mod error;
pub mod event;
pub mod hexrange;
pub mod info;
pub mod nip05;
pub mod nauthz;
pub mod nip05;
pub mod notice;
pub mod repo;
pub mod subscription;
pub mod utils;
// Public API for creating relays programatically
// Public API for creating relays programmatically
pub mod payment;
pub mod server;

View File

@ -1,13 +1,24 @@
//! Server process
use clap::Parser;
use console_subscriber::ConsoleLayer;
use nostr_rs_relay::cli::CLIArgs;
use nostr_rs_relay::config;
use nostr_rs_relay::server::start_server;
use std::fs;
use std::path::Path;
use std::process;
use std::sync::mpsc as syncmpsc;
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
use std::thread;
#[cfg(not(target_env = "msvc"))]
use tikv_jemallocator::Jemalloc;
use tracing::info;
use console_subscriber::ConsoleLayer;
use tracing_appender::non_blocking::WorkerGuard;
use tracing_subscriber::EnvFilter;
#[cfg(not(target_env = "msvc"))]
#[global_allocator]
static GLOBAL: Jemalloc = Jemalloc;
/// Start running a Nostr relay server.
fn main() {
@ -16,9 +27,35 @@ fn main() {
// get config file name from args
let config_file_arg = args.config;
// Ensure the config file is readable if it was explicitly set
if let Some(config_path) = config_file_arg.as_ref() {
let path = Path::new(&config_path);
if !path.exists() {
eprintln!("Config file not found: {}", &config_path);
process::exit(1);
}
if !path.is_file() {
eprintln!("Invalid config file path: {}", &config_path);
process::exit(1);
}
if let Err(err) = fs::metadata(path) {
eprintln!("Error while accessing file metadata: {}", err);
process::exit(1);
}
if let Err(err) = fs::File::open(path) {
eprintln!("Config file is not readable: {}", err);
process::exit(1);
}
}
let mut _log_guard: Option<WorkerGuard> = None;
// configure settings from the config file (defaults to config.toml)
// replace default settings with those read from the config file
let mut settings = config::Settings::new(&config_file_arg);
let mut settings = config::Settings::new(&config_file_arg).unwrap_or_else(|e| {
eprintln!("Error reading config file ({:?})", e);
process::exit(1);
});
// setup tracing
if settings.diagnostics.tracing {
@ -26,7 +63,27 @@ fn main() {
ConsoleLayer::builder().with_default_env().init();
} else {
// standard logging
tracing_subscriber::fmt::try_init().unwrap();
if let Some(path) = &settings.logging.folder_path {
// write logs to a folder
let prefix = match &settings.logging.file_prefix {
Some(p) => p.as_str(),
None => "relay",
};
let file_appender = tracing_appender::rolling::daily(path, prefix);
let (non_blocking, guard) = tracing_appender::non_blocking(file_appender);
let filter = EnvFilter::from_default_env();
// assign to a variable that is not dropped till the program ends
_log_guard = Some(guard);
tracing_subscriber::fmt()
.with_env_filter(filter)
.with_writer(non_blocking)
.try_init()
.unwrap();
} else {
// write to stdout
tracing_subscriber::fmt::try_init().unwrap();
}
}
info!("Starting up from main");

View File

@ -35,13 +35,13 @@ impl std::convert::From<Nip05Name> for nauthz_grpc::event_request::Nip05Name {
fn from(value: Nip05Name) -> Self {
nauthz_grpc::event_request::Nip05Name {
local: value.local.clone(),
domain: value.domain.clone(),
domain: value.domain,
}
}
}
// conversion of event tags into gprc struct
fn tags_to_protobuf(tags: &Vec<Vec<String>>) -> Vec<TagEntry> {
fn tags_to_protobuf(tags: &[Vec<String>]) -> Vec<TagEntry> {
tags.iter()
.map(|x| TagEntry { values: x.clone() })
.collect()
@ -57,7 +57,7 @@ impl EventAuthzService {
eas
}
pub async fn ready_connection(self: &mut Self) {
pub async fn ready_connection(&mut self) {
if self.conn.is_none() {
let client = AuthorizationClient::connect(self.server_addr.to_string()).await;
if let Err(ref msg) = client {
@ -70,12 +70,13 @@ impl EventAuthzService {
}
pub async fn admit_event(
self: &mut Self,
&mut self,
event: &Event,
ip: &str,
origin: Option<String>,
user_agent: Option<String>,
nip05: Option<Nip05Name>,
auth_pubkey: Option<Vec<u8>>,
) -> Result<Box<dyn AuthzDecision>> {
self.ready_connection().await;
let id_blob = hex::decode(&event.id)?;
@ -97,14 +98,14 @@ impl EventAuthzService {
ip_addr: Some(ip.to_string()),
origin,
user_agent,
auth_pubkey: None,
nip05: nip05.map(|x| nauthz_grpc::event_request::Nip05Name::from(x)),
auth_pubkey,
nip05: nip05.map(nauthz_grpc::event_request::Nip05Name::from),
})
.await?;
let reply = svr_res.into_inner();
return Ok(Box::new(reply));
Ok(Box::new(reply))
} else {
return Err(Error::AuthzError);
Err(Error::AuthzError)
}
}
}

View File

@ -8,11 +8,11 @@ use crate::config::VerifiedUsers;
use crate::error::{Error, Result};
use crate::event::Event;
use crate::repo::NostrRepo;
use std::sync::Arc;
use hyper::body::HttpBody;
use hyper::client::connect::HttpConnector;
use hyper::Client;
use hyper_tls::HttpsConnector;
use hyper_rustls::HttpsConnector;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use std::time::SystemTime;
@ -48,7 +48,8 @@ pub struct Nip05Name {
impl Nip05Name {
/// Does this name represent the entire domain?
#[must_use] pub fn is_domain_only(&self) -> bool {
#[must_use]
pub fn is_domain_only(&self) -> bool {
self.local == "_"
}
@ -58,8 +59,8 @@ impl Nip05Name {
"https://{}/.well-known/nostr.json?name={}",
self.domain, self.local
)
.parse::<http::Uri>()
.ok()
.parse::<http::Uri>()
.ok()
}
}
@ -73,7 +74,10 @@ impl std::convert::TryFrom<&str> for Nip05Name {
// check if local name is valid
let local = components[0];
let domain = components[1];
if local.chars().all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.') {
if local
.chars()
.all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.')
{
if domain
.chars()
.all(|x| x.is_alphanumeric() || x == '-' || x == '.')
@ -129,7 +133,12 @@ impl Verifier {
) -> Result<Self> {
info!("creating NIP-05 verifier");
// setup hyper client
let https = HttpsConnector::new();
let https = hyper_rustls::HttpsConnectorBuilder::new()
.with_native_roots()
.https_or_http()
.enable_http1()
.build();
let client = Client::builder().build::<_, hyper::Body>(https);
// After all accounts have been re-verified, don't check again
@ -261,10 +270,10 @@ impl Verifier {
Err(Error::ChannelClosed) => {
// channel was closed, we are shutting down
return;
},
}
Err(e) => {
info!("error in verifier: {:?}", e);
},
info!("error in verifier: {:?}", e);
}
_ => {}
}
}
@ -349,42 +358,41 @@ impl Verifier {
UserWebVerificationStatus::Verified => {
// freshly verified account, update the
// timestamp.
self.repo.update_verification_timestamp(v.rowid)
.await?;
self.repo.update_verification_timestamp(v.rowid).await?;
info!("verification updated for {}", v.to_string());
}
UserWebVerificationStatus::DomainNotAllowed
| UserWebVerificationStatus::Unknown => {
// server may be offline, or temporarily
// blocked by the config file. Note the
// failure so we can process something
// else.
| UserWebVerificationStatus::Unknown => {
// server may be offline, or temporarily
// blocked by the config file. Note the
// failure so we can process something
// else.
// have we had enough failures to give up?
if v.failure_count >= max_failures as u64 {
info!(
"giving up on verifying {:?} after {} failures",
v.name, v.failure_count
);
self.repo.delete_verification(v.rowid)
.await?;
} else {
// record normal failure, incrementing failure count
info!("verification failed for {}", v.to_string());
self.repo.fail_verification(v.rowid).await?;
}
}
// have we had enough failures to give up?
if v.failure_count >= max_failures as u64 {
info!(
"giving up on verifying {:?} after {} failures",
v.name, v.failure_count
);
self.repo.delete_verification(v.rowid).await?;
} else {
// record normal failure, incrementing failure count
info!("verification failed for {}", v.to_string());
self.repo.fail_verification(v.rowid).await?;
}
}
UserWebVerificationStatus::Unverified => {
// domain has removed the verification, drop
// the record on our side.
info!("verification rescinded for {}", v.to_string());
self.repo.delete_verification(v.rowid)
.await?;
self.repo.delete_verification(v.rowid).await?;
}
}
}
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
Err(
Error::SqlError(rusqlite::Error::QueryReturnedNoRows)
| Error::SqlxError(sqlx::Error::RowNotFound),
) => {
// No users need verification. Reset the interval to
// the next verification attempt.
let start = tokio::time::Instant::now() + self.wait_after_finish;
@ -433,7 +441,9 @@ impl Verifier {
}
}
// write the verification record
self.repo.create_verification_record(&event.id, name).await?;
self.repo
.create_verification_record(&event.id, name)
.await?;
Ok(())
}
}
@ -463,7 +473,8 @@ pub struct VerificationRecord {
/// Check with settings to determine if a given domain is allowed to
/// publish.
#[must_use] pub fn is_domain_allowed(
#[must_use]
pub fn is_domain_allowed(
domain: &str,
whitelist: &Option<Vec<String>>,
blacklist: &Option<Vec<String>>,
@ -483,7 +494,8 @@ pub struct VerificationRecord {
impl VerificationRecord {
/// Check if the record is recent enough to be considered valid,
/// and the domain is allowed.
#[must_use] pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
#[must_use]
pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
//let settings = SETTINGS.read().unwrap();
// how long a verification record is good for
let nip05_expiration = &verified_users_settings.verify_expiration_duration;

View File

@ -5,6 +5,7 @@ pub enum EventResultStatus {
Blocked,
RateLimited,
Error,
Restricted,
}
pub struct EventResult {
@ -16,17 +17,20 @@ pub struct EventResult {
pub enum Notice {
Message(String),
EventResult(EventResult),
AuthChallenge(String),
}
impl EventResultStatus {
#[must_use] pub fn to_bool(&self) -> bool {
#[must_use]
pub fn to_bool(&self) -> bool {
match self {
Self::Duplicate | Self::Saved => true,
Self::Invalid |Self::Blocked | Self::RateLimited | Self::Error => false,
Self::Invalid | Self::Blocked | Self::RateLimited | Self::Error | Self::Restricted => false,
}
}
#[must_use] pub fn prefix(&self) -> &'static str {
#[must_use]
pub fn prefix(&self) -> &'static str {
match self {
Self::Saved => "saved",
Self::Duplicate => "duplicate",
@ -34,6 +38,7 @@ impl EventResultStatus {
Self::Blocked => "blocked",
Self::RateLimited => "rate-limited",
Self::Error => "error",
Self::Restricted => "restricted",
}
}
}
@ -43,7 +48,8 @@ impl Notice {
// Notice::err_msg(format!("{}", err), id)
//}
#[must_use] pub fn message(msg: String) -> Notice {
#[must_use]
pub fn message(msg: String) -> Notice {
Notice::Message(msg)
}
@ -52,27 +58,38 @@ impl Notice {
Notice::EventResult(EventResult { id, msg, status })
}
#[must_use] pub fn invalid(id: String, msg: &str) -> Notice {
#[must_use]
pub fn invalid(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Invalid)
}
#[must_use] pub fn blocked(id: String, msg: &str) -> Notice {
#[must_use]
pub fn blocked(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Blocked)
}
#[must_use] pub fn rate_limited(id: String, msg: &str) -> Notice {
#[must_use]
pub fn rate_limited(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::RateLimited)
}
#[must_use] pub fn duplicate(id: String) -> Notice {
#[must_use]
pub fn duplicate(id: String) -> Notice {
Notice::prefixed(id, "", EventResultStatus::Duplicate)
}
#[must_use] pub fn error(id: String, msg: &str) -> Notice {
#[must_use]
pub fn error(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Error)
}
#[must_use] pub fn saved(id: String) -> Notice {
#[must_use]
pub fn restricted(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Restricted)
}
#[must_use]
pub fn saved(id: String) -> Notice {
Notice::EventResult(EventResult {
id,
msg: "".into(),

176
src/payment/lnbits.rs Normal file
View File

@ -0,0 +1,176 @@
//! LNBits payment processor
use http::Uri;
use hyper::client::connect::HttpConnector;
use hyper::Client;
use hyper_rustls::HttpsConnector;
use nostr::Keys;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use async_trait::async_trait;
use rand::Rng;
use std::str::FromStr;
use url::Url;
use crate::{config::Settings, error::Error};
use super::{InvoiceInfo, InvoiceStatus, PaymentProcessor};
const APIPATH: &str = "/api/v1/payments/";
/// Info LNBits expects in create invoice request
#[derive(Serialize, Deserialize, Debug)]
pub struct LNBitsCreateInvoice {
out: bool,
amount: u64,
memo: String,
webhook: String,
unit: String,
internal: bool,
expiry: u64,
}
/// Invoice response for LN bits
#[derive(Debug, Serialize, Deserialize)]
pub struct LNBitsCreateInvoiceResponse {
payment_hash: String,
payment_request: String,
}
/// LNBits call back response
/// Used when an invoice is paid
/// lnbits to post the status change to relay
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LNBitsCallback {
pub checking_id: String,
pub pending: bool,
pub amount: u64,
pub memo: String,
pub time: u64,
pub bolt11: String,
pub preimage: String,
pub payment_hash: String,
pub wallet_id: String,
pub webhook: String,
pub webhook_status: Option<String>,
}
/// LN Bits repose for check invoice endpoint
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LNBitsCheckInvoiceResponse {
paid: bool,
}
#[derive(Clone)]
pub struct LNBitsPaymentProcessor {
/// HTTP client
client: hyper::Client<HttpsConnector<HttpConnector>, hyper::Body>,
settings: Settings,
}
impl LNBitsPaymentProcessor {
pub fn new(settings: &Settings) -> Self {
// setup hyper client
let https = hyper_rustls::HttpsConnectorBuilder::new()
.with_native_roots()
.https_only()
.enable_http1()
.build();
let client = Client::builder().build::<_, hyper::Body>(https);
Self {
client,
settings: settings.clone(),
}
}
}
#[async_trait]
impl PaymentProcessor for LNBitsPaymentProcessor {
/// Calls LNBits api to ger new invoice
async fn get_invoice(&self, key: &Keys, amount: u64) -> Result<InvoiceInfo, Error> {
let random_number: u16 = rand::thread_rng().gen();
let memo = format!("{}: {}", random_number, key.public_key());
let callback_url = Url::parse(
&self
.settings
.info
.relay_url
.clone()
.unwrap()
.replace("ws", "http"),
)?
.join("lnbits")?;
let body = LNBitsCreateInvoice {
out: false,
amount,
memo: memo.clone(),
webhook: callback_url.to_string(),
unit: "sat".to_string(),
internal: false,
expiry: 3600,
};
let url = Url::parse(&self.settings.pay_to_relay.node_url)?.join(APIPATH)?;
let uri = Uri::from_str(url.as_str().strip_suffix('/').unwrap_or(url.as_str())).unwrap();
let req = hyper::Request::builder()
.method(hyper::Method::POST)
.uri(uri)
.header("X-Api-Key", &self.settings.pay_to_relay.api_secret)
.body(hyper::Body::from(serde_json::to_string(&body)?))
.expect("request builder");
let res = self.client.request(req).await?;
// Json to Struct of LNbits callback
let body = hyper::body::to_bytes(res.into_body()).await?;
let invoice_response: LNBitsCreateInvoiceResponse = serde_json::from_slice(&body)?;
Ok(InvoiceInfo {
pubkey: key.public_key().to_string(),
payment_hash: invoice_response.payment_hash,
bolt11: invoice_response.payment_request,
amount,
memo,
status: InvoiceStatus::Unpaid,
confirmed_at: None,
})
}
/// Calls LNBits Api to check the payment status of invoice
async fn check_invoice(&self, payment_hash: &str) -> Result<InvoiceStatus, Error> {
let url = Url::parse(&self.settings.pay_to_relay.node_url)?
.join(APIPATH)?
.join(payment_hash)?;
let uri = Uri::from_str(url.as_str()).unwrap();
let req = hyper::Request::builder()
.method(hyper::Method::GET)
.uri(uri)
.header("X-Api-Key", &self.settings.pay_to_relay.api_secret)
.body(hyper::Body::empty())
.expect("request builder");
let res = self.client.request(req).await?;
// Json to Struct of LNbits callback
let body = hyper::body::to_bytes(res.into_body()).await?;
let invoice_response: Value = serde_json::from_slice(&body)?;
let status = if let Ok(invoice_response) =
serde_json::from_value::<LNBitsCheckInvoiceResponse>(invoice_response)
{
if invoice_response.paid {
InvoiceStatus::Paid
} else {
InvoiceStatus::Unpaid
}
} else {
InvoiceStatus::Expired
};
Ok(status)
}
}

276
src/payment/mod.rs Normal file
View File

@ -0,0 +1,276 @@
use crate::error::{Error, Result};
use crate::event::Event;
use crate::payment::lnbits::LNBitsPaymentProcessor;
use crate::repo::NostrRepo;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tracing::{info, warn};
use async_trait::async_trait;
use nostr::key::{FromPkStr, FromSkStr};
use nostr::{key::Keys, Event as NostrEvent, EventBuilder};
pub mod lnbits;
/// Payment handler
pub struct Payment {
/// Repository for saving/retrieving events and events
repo: Arc<dyn NostrRepo>,
/// Newly validated events get written and then broadcast on this channel to subscribers
event_tx: tokio::sync::broadcast::Sender<Event>,
/// Payment message sender
payment_tx: tokio::sync::broadcast::Sender<PaymentMessage>,
/// Payment message receiver
payment_rx: tokio::sync::broadcast::Receiver<PaymentMessage>,
/// Settings
settings: crate::config::Settings,
// Nostr Keys
nostr_keys: Option<Keys>,
/// Payment Processor
processor: Arc<dyn PaymentProcessor>,
}
#[async_trait]
pub trait PaymentProcessor: Send + Sync {
/// Get invoice from processor
async fn get_invoice(&self, keys: &Keys, amount: u64) -> Result<InvoiceInfo, Error>;
/// Check payment status of an invoice
async fn check_invoice(&self, payment_hash: &str) -> Result<InvoiceStatus, Error>;
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub enum Processor {
LNBits,
}
/// Possible states of an invoice
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, sqlx::Type)]
#[sqlx(type_name = "status")]
pub enum InvoiceStatus {
Unpaid,
Paid,
Expired,
}
impl ToString for InvoiceStatus {
fn to_string(&self) -> String {
match self {
InvoiceStatus::Paid => "Paid".to_string(),
InvoiceStatus::Unpaid => "Unpaid".to_string(),
InvoiceStatus::Expired => "Expired".to_string(),
}
}
}
/// Invoice information
#[derive(Debug, Clone)]
pub struct InvoiceInfo {
pub pubkey: String,
pub payment_hash: String,
pub bolt11: String,
pub amount: u64,
pub status: InvoiceStatus,
pub memo: String,
pub confirmed_at: Option<u64>,
}
/// Message variants for the payment channel
#[derive(Debug, Clone)]
pub enum PaymentMessage {
/// New account
NewAccount(String),
/// Check account,
CheckAccount(String),
/// Account Admitted
AccountAdmitted(String),
/// Invoice generated
Invoice(String, InvoiceInfo),
/// Invoice call back
/// Payment hash is passed
// This may have to be changed to better support other processors
InvoicePaid(String),
}
impl Payment {
pub fn new(
repo: Arc<dyn NostrRepo>,
payment_tx: tokio::sync::broadcast::Sender<PaymentMessage>,
payment_rx: tokio::sync::broadcast::Receiver<PaymentMessage>,
event_tx: tokio::sync::broadcast::Sender<Event>,
settings: crate::config::Settings,
) -> Result<Self> {
info!("Create payment handler");
// Create nostr key from sk string
let nostr_keys = if let Some(secret_key) = &settings.pay_to_relay.secret_key {
Some(Keys::from_sk_str(secret_key)?)
} else {
None
};
// Create processor kind defined in settings
let processor = match &settings.pay_to_relay.processor {
Processor::LNBits => Arc::new(LNBitsPaymentProcessor::new(&settings)),
};
Ok(Payment {
repo,
payment_tx,
payment_rx,
event_tx,
settings,
nostr_keys,
processor,
})
}
/// Perform Payment tasks
pub async fn run(&mut self) {
loop {
let res = self.run_internal().await;
if let Err(e) = res {
info!("error in payment: {:?}", e);
}
}
}
/// Internal select loop for preforming payment operations
async fn run_internal(&mut self) -> Result<()> {
tokio::select! {
m = self.payment_rx.recv() => {
match m {
Ok(PaymentMessage::NewAccount(pubkey)) => {
info!("payment event for {:?}", pubkey);
// REVIEW: This will need to change for cost per event
let amount = self.settings.pay_to_relay.admission_cost;
let invoice_info = self.get_invoice_info(&pubkey, amount).await?;
// TODO: should handle this error
self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok();
},
// Gets the most recent unpaid invoice from database
// Checks LNbits to verify if paid/unpaid
Ok(PaymentMessage::CheckAccount(pubkey)) => {
let keys = Keys::from_pk_str(&pubkey)?;
if let Ok(Some(invoice_info)) = self.repo.get_unpaid_invoice(&keys).await {
match self.check_invoice_status(&invoice_info.payment_hash).await? {
InvoiceStatus::Paid => {
self.repo.admit_account(&keys, self.settings.pay_to_relay.admission_cost).await?;
self.payment_tx.send(PaymentMessage::AccountAdmitted(pubkey)).ok();
}
_ => {
self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok();
}
}
} else {
let amount = self.settings.pay_to_relay.admission_cost;
let invoice_info = self.get_invoice_info(&pubkey, amount).await?;
self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok();
}
}
Ok(PaymentMessage::InvoicePaid(payment_hash)) => {
if self.check_invoice_status(&payment_hash).await?.eq(&InvoiceStatus::Paid) {
let pubkey = self.repo
.update_invoice(&payment_hash, InvoiceStatus::Paid)
.await?;
let key = Keys::from_pk_str(&pubkey)?;
self.repo.admit_account(&key, self.settings.pay_to_relay.admission_cost).await?;
}
}
Ok(_) => {
// For this variant nothing need to be done here
// it is used by `server`
}
Err(err) => warn!("Payment RX: {err}")
}
}
}
Ok(())
}
/// Sends Nostr DM to pubkey that requested invoice
/// Two events the terms followed by the bolt11 invoice
pub async fn send_admission_message(
&self,
pubkey: &str,
invoice_info: &InvoiceInfo,
) -> Result<()> {
let nostr_keys = match &self.nostr_keys {
Some(key) => key,
None => return Err(Error::CustomError("Nostr key not defined".to_string())),
};
// Create Nostr key from pk
let key = Keys::from_pk_str(pubkey)?;
let pubkey = key.public_key();
// Event DM with terms of service
let message_event: NostrEvent = EventBuilder::new_encrypted_direct_msg(
nostr_keys,
pubkey,
&self.settings.pay_to_relay.terms_message,
)?
.to_event(nostr_keys)?;
// Event DM with invoice
let invoice_event: NostrEvent =
EventBuilder::new_encrypted_direct_msg(nostr_keys, pubkey, &invoice_info.bolt11)?
.to_event(nostr_keys)?;
// Persist DM events to DB
self.repo.write_event(&message_event.clone().into()).await?;
self.repo.write_event(&invoice_event.clone().into()).await?;
// Broadcast DM events
self.event_tx.send(message_event.clone().into()).ok();
self.event_tx.send(invoice_event.clone().into()).ok();
Ok(())
}
/// Get Invoice Info
/// If the has an active invoice that will be return
/// Otherwise a new invoice will be generated by the payment processor
pub async fn get_invoice_info(&self, pubkey: &str, amount: u64) -> Result<InvoiceInfo> {
// If user is already in DB this will be false
// This avoids recreating admission invoices
// I think it will continue to send DMs with the invoice
// If client continues to try and write to the relay (will be same invoice)
let key = Keys::from_pk_str(pubkey)?;
if !self.repo.create_account(&key).await? {
if let Ok(Some(invoice_info)) = self.repo.get_unpaid_invoice(&key).await {
return Ok(invoice_info);
}
}
let key = Keys::from_pk_str(pubkey)?;
let invoice_info = self.processor.get_invoice(&key, amount).await?;
// Persist invoice to DB
self.repo
.create_invoice_record(&key, invoice_info.clone())
.await?;
if self.settings.pay_to_relay.direct_message {
// Admission event invoice and terms to pubkey that is joining
self.send_admission_message(pubkey, &invoice_info).await?;
}
Ok(invoice_info)
}
/// Check paid status of invoice with LNbits
pub async fn check_invoice_status(&self, payment_hash: &str) -> Result<InvoiceStatus, Error> {
// Check base if passed expiry time
let status = self.processor.check_invoice(payment_hash).await?;
self.repo
.update_invoice(payment_hash, status.clone())
.await?;
Ok(status)
}
}

View File

@ -2,15 +2,17 @@ use crate::db::QueryResult;
use crate::error::Result;
use crate::event::Event;
use crate::nip05::VerificationRecord;
use crate::payment::{InvoiceInfo, InvoiceStatus};
use crate::subscription::Subscription;
use crate::utils::unix_time;
use async_trait::async_trait;
use nostr::Keys;
use rand::Rng;
pub mod sqlite;
pub mod sqlite_migration;
pub mod postgres;
pub mod postgres_migration;
pub mod sqlite;
pub mod sqlite_migration;
#[async_trait]
pub trait NostrRepo: Send + Sync {
@ -57,6 +59,33 @@ pub trait NostrRepo: Send + Sync {
/// Get oldest verification before timestamp
async fn get_oldest_user_verification(&self, before: u64) -> Result<VerificationRecord>;
/// Create a new account
async fn create_account(&self, pubkey: &Keys) -> Result<bool>;
/// Admit an account
async fn admit_account(&self, pubkey: &Keys, admission_cost: u64) -> Result<()>;
/// Gets user balance if they are an admitted pubkey
async fn get_account_balance(&self, pubkey: &Keys) -> Result<(bool, u64)>;
/// Update account balance
async fn update_account_balance(
&self,
pub_key: &Keys,
positive: bool,
new_balance: u64,
) -> Result<()>;
/// Create invoice record
async fn create_invoice_record(&self, pubkey: &Keys, invoice_info: InvoiceInfo) -> Result<()>;
/// Update Invoice for given payment hash
async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result<String>;
/// Get the most recent invoice for a given pubkey
/// invoice must be unpaid and not expired
async fn get_unpaid_invoice(&self, pubkey: &Keys) -> Result<Option<InvoiceInfo>>;
}
// Current time, with a slight forward jitter in seconds

View File

@ -2,57 +2,96 @@ use crate::db::QueryResult;
use crate::error::Result;
use crate::event::{single_char_tagname, Event};
use crate::nip05::{Nip05Name, VerificationRecord};
use crate::payment::{InvoiceInfo, InvoiceStatus};
use crate::repo::{now_jitter, NostrRepo};
use crate::subscription::{ReqFilter, Subscription};
use async_std::stream::StreamExt;
use async_trait::async_trait;
use chrono::{DateTime, TimeZone, Utc};
use sqlx::postgres::PgRow;
use sqlx::Error::RowNotFound;
use sqlx::{Error, Execute, FromRow, Postgres, QueryBuilder, Row};
use std::time::{Duration, Instant};
use sqlx::Error::RowNotFound;
use crate::hexrange::{hex_range, HexSearch};
use crate::error;
use crate::repo::postgres_migration::run_migrations;
use crate::server::NostrMetrics;
use crate::utils::{is_hex, is_lower_hex};
use crate::utils::{self, is_hex, is_lower_hex};
use nostr::key::Keys;
use tokio::sync::mpsc::Sender;
use tokio::sync::oneshot::Receiver;
use tracing::log::trace;
use tracing::{debug, error, info};
use crate::error;
use tracing::{debug, error, info, trace, warn};
pub type PostgresPool = sqlx::pool::Pool<Postgres>;
pub struct PostgresRepo {
conn: PostgresPool,
conn_write: PostgresPool,
metrics: NostrMetrics,
}
impl PostgresRepo {
pub fn new(c: PostgresPool, m: NostrMetrics) -> PostgresRepo {
pub fn new(c: PostgresPool, cw: PostgresPool, m: NostrMetrics) -> PostgresRepo {
PostgresRepo {
conn: c,
conn_write: cw,
metrics: m,
}
}
}
/// Cleanup expired events on a regular basis
async fn cleanup_expired(conn: PostgresPool, frequency: Duration) -> Result<()> {
tokio::task::spawn(async move {
loop {
tokio::select! {
_ = tokio::time::sleep(frequency) => {
let start = Instant::now();
let exp_res = delete_expired(conn.clone()).await;
match exp_res {
Ok(exp_count) => {
if exp_count > 0 {
info!("removed {} expired events in: {:?}", exp_count, start.elapsed());
}
},
Err(e) => {
warn!("could not remove expired events due to error: {:?}", e);
}
}
}
};
}
});
Ok(())
}
/// One-time deletion of all expired events
async fn delete_expired(conn: PostgresPool) -> Result<u64> {
let mut tx = conn.begin().await?;
let update_count = sqlx::query("DELETE FROM \"event\" WHERE expires_at <= $1;")
.bind(Utc.timestamp_opt(utils::unix_time() as i64, 0).unwrap())
.execute(&mut tx)
.await?
.rows_affected();
tx.commit().await?;
Ok(update_count)
}
#[async_trait]
impl NostrRepo for PostgresRepo {
async fn start(&self) -> Result<()> {
info!("not implemented");
Ok(())
// begin a cleanup task for expired events.
cleanup_expired(self.conn_write.clone(), Duration::from_secs(600)).await?;
Ok(())
}
async fn migrate_up(&self) -> Result<usize> {
Ok(run_migrations(&self.conn).await?)
Ok(run_migrations(&self.conn_write).await?)
}
async fn write_event(&self, e: &Event) -> Result<u64> {
// start transaction
let mut tx = self.conn.begin().await?;
let mut tx = self.conn_write.begin().await?;
let start = Instant::now();
// get relevant fields from event and convert to blobs.
@ -66,7 +105,7 @@ impl NostrRepo for PostgresRepo {
// replaceable event or parameterized replaceable event.
if e.is_replaceable() {
let repl_count = sqlx::query(
"SELECT e.id FROM event e WHERE e.pub_key=? AND e.kind=? AND e.created_at >= ? LIMIT 1;")
"SELECT e.id FROM event e WHERE e.pub_key=$1 AND e.kind=$2 AND e.created_at >= $3 LIMIT 1;")
.bind(&pubkey_blob)
.bind(e.kind as i64)
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
@ -77,7 +116,7 @@ impl NostrRepo for PostgresRepo {
}
}
if let Some(d_tag) = e.distinct_param() {
let repl_count:i64 = if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) {
let repl_count: i64 = if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) {
sqlx::query_scalar(
"SELECT count(*) AS count FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.pub_key=$1 AND e.kind=$2 AND t.name='d' AND t.value_hex=$3 AND e.created_at >= $4 LIMIT 1;")
.bind(hex::decode(&e.pubkey).ok())
@ -100,25 +139,29 @@ impl NostrRepo for PostgresRepo {
// the same author/kind/tag value exist, and we can ignore
// this event.
if repl_count > 0 {
return Ok(0)
return Ok(0);
}
}
// ignore if the event hash is a duplicate.
let mut ins_count = sqlx::query(
r#"INSERT INTO "event"
(id, pub_key, created_at, kind, "content", delegated_by)
VALUES($1, $2, $3, $4, $5, $6)
(id, pub_key, created_at, expires_at, kind, "content", delegated_by)
VALUES($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (id) DO NOTHING"#,
)
.bind(&id_blob)
.bind(&pubkey_blob)
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
.bind(e.kind as i64)
.bind(event_str.into_bytes())
.bind(delegator_blob)
.execute(&mut tx)
.await?
.rows_affected();
.bind(&id_blob)
.bind(&pubkey_blob)
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
.bind(
e.expiration()
.and_then(|x| Utc.timestamp_opt(x as i64, 0).latest()),
)
.bind(e.kind as i64)
.bind(event_str.into_bytes())
.bind(delegator_blob)
.execute(&mut tx)
.await?
.rows_affected();
if ins_count == 0 {
// if the event was a duplicate, no need to insert event or
@ -134,25 +177,27 @@ ON CONFLICT (id) DO NOTHING"#,
let tag_val = &tag[1];
// only single-char tags are searchable
let tag_char_opt = single_char_tagname(tag_name);
let query = "INSERT INTO tag (event_id, \"name\", value) VALUES($1, $2, $3) \
ON CONFLICT (event_id, \"name\", value) DO NOTHING";
match &tag_char_opt {
Some(_) => {
// if tag value is lowercase hex;
if is_lower_hex(tag_val) && (tag_val.len() % 2 == 0) {
sqlx::query(query)
sqlx::query("INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES($1, $2, NULL, $3) \
ON CONFLICT (event_id, \"name\", value, value_hex) DO NOTHING")
.bind(&id_blob)
.bind(tag_name)
.bind(hex::decode(tag_val).ok())
.execute(&mut tx)
.await?;
.await
.unwrap();
} else {
sqlx::query(query)
sqlx::query("INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES($1, $2, $3, NULL) \
ON CONFLICT (event_id, \"name\", value, value_hex) DO NOTHING")
.bind(&id_blob)
.bind(tag_name)
.bind(tag_val.as_bytes())
.execute(&mut tx)
.await?;
.await
.unwrap();
}
}
None => {}
@ -236,10 +281,10 @@ ON CONFLICT (id) DO NOTHING"#,
LEFT JOIN tag t ON e.id = t.event_id \
WHERE e.pub_key = $1 AND t.\"name\" = 'e' AND e.kind = 5 AND t.value = $2 LIMIT 1",
)
.bind(&pubkey_blob)
.bind(&id_blob)
.fetch_optional(&mut tx)
.await?;
.bind(&pubkey_blob)
.bind(&id_blob)
.fetch_optional(&mut tx)
.await?;
// check if a the query returned a result, meaning we should
// hid the current event
@ -340,7 +385,10 @@ ON CONFLICT (id) DO NOTHING"#,
// check if this is still active; every 100 rows
if row_count % 100 == 0 && abandon_query_rx.try_recv().is_ok() {
debug!("query cancelled by client (cid: {}, sub: {:?})", client_id, sub.id);
debug!(
"query cancelled by client (cid: {}, sub: {:?})",
client_id, sub.id
);
return Ok(());
}
@ -356,7 +404,10 @@ ON CONFLICT (id) DO NOTHING"#,
if last_successful_send + abort_cutoff < Instant::now() {
// the queue has been full for too long, abort
info!("aborting database query due to slow client");
metrics.query_aborts.with_label_values(&["slowclient"]).inc();
metrics
.query_aborts
.with_label_values(&["slowclient"])
.inc();
return Ok(());
}
// give the queue a chance to clear before trying again
@ -404,7 +455,7 @@ ON CONFLICT (id) DO NOTHING"#,
}
async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()> {
let mut tx = self.conn.begin().await?;
let mut tx = self.conn_write.begin().await?;
sqlx::query("DELETE FROM user_verification WHERE \"name\" = $1")
.bind(name)
@ -427,12 +478,10 @@ ON CONFLICT (id) DO NOTHING"#,
let verify_time = now_jitter(600);
// update verification time and reset any failure count
sqlx::query(
"UPDATE user_verification SET verified_at = $1, fail_count = 0 WHERE id = $2",
)
sqlx::query("UPDATE user_verification SET verified_at = $1, fail_count = 0 WHERE id = $2")
.bind(Utc.timestamp_opt(verify_time as i64, 0).unwrap())
.bind(id as i64)
.execute(&self.conn)
.execute(&self.conn_write)
.await?;
info!("verification updated for {}", id);
@ -442,7 +491,7 @@ ON CONFLICT (id) DO NOTHING"#,
async fn fail_verification(&self, id: u64) -> Result<()> {
sqlx::query("UPDATE user_verification SET failed_at = now(), fail_count = fail_count + 1 WHERE id = $1")
.bind(id as i64)
.execute(&self.conn)
.execute(&self.conn_write)
.await?;
Ok(())
}
@ -450,7 +499,7 @@ ON CONFLICT (id) DO NOTHING"#,
async fn delete_verification(&self, id: u64) -> Result<()> {
sqlx::query("DELETE FROM user_verification WHERE id = $1")
.bind(id as i64)
.execute(&self.conn)
.execute(&self.conn_write)
.await?;
Ok(())
}
@ -499,6 +548,172 @@ ON CONFLICT (id) DO NOTHING"#,
.await?
.ok_or(error::Error::SqlxError(RowNotFound))
}
async fn create_account(&self, pub_key: &Keys) -> Result<bool> {
let pub_key = pub_key.public_key().to_string();
let mut tx = self.conn_write.begin().await?;
let result = sqlx::query("INSERT INTO account (pubkey, balance) VALUES ($1, 0);")
.bind(pub_key)
.execute(&mut tx)
.await;
let success = match result {
Ok(res) => {
tx.commit().await?;
res.rows_affected() == 1
}
Err(_err) => false,
};
Ok(success)
}
/// Admit account
async fn admit_account(&self, pub_key: &Keys, admission_cost: u64) -> Result<()> {
let pub_key = pub_key.public_key().to_string();
sqlx::query(
"UPDATE account SET is_admitted = TRUE, balance = balance - $1 WHERE pubkey = $2",
)
.bind(admission_cost as i64)
.bind(pub_key)
.execute(&self.conn_write)
.await?;
Ok(())
}
/// Gets if the account is admitted and balance
async fn get_account_balance(&self, pub_key: &Keys) -> Result<(bool, u64)> {
let pub_key = pub_key.public_key().to_string();
let query = r#"SELECT
is_admitted,
balance
FROM account
WHERE pubkey = $1
LIMIT 1"#;
let result = sqlx::query_as::<_, (bool, i64)>(query)
.bind(pub_key)
.fetch_optional(&self.conn_write)
.await?
.ok_or(error::Error::SqlxError(RowNotFound))?;
Ok((result.0, result.1 as u64))
}
/// Update account balance
async fn update_account_balance(
&self,
pub_key: &Keys,
positive: bool,
new_balance: u64,
) -> Result<()> {
let pub_key = pub_key.public_key().to_string();
match positive {
true => {
sqlx::query("UPDATE account SET balance = balance + $1 WHERE pubkey = $2")
.bind(new_balance as i64)
.bind(pub_key)
.execute(&self.conn_write)
.await?
}
false => {
sqlx::query("UPDATE account SET balance = balance - $1 WHERE pubkey = $2")
.bind(new_balance as i64)
.bind(pub_key)
.execute(&self.conn_write)
.await?
}
};
Ok(())
}
/// Create invoice record
async fn create_invoice_record(&self, pub_key: &Keys, invoice_info: InvoiceInfo) -> Result<()> {
let pub_key = pub_key.public_key().to_string();
let mut tx = self.conn_write.begin().await?;
sqlx::query(
"INSERT INTO invoice (pubkey, payment_hash, amount, status, description, created_at, invoice) VALUES ($1, $2, $3, $4, $5, now(), $6)",
)
.bind(pub_key)
.bind(invoice_info.payment_hash)
.bind(invoice_info.amount as i64)
.bind(invoice_info.status)
.bind(invoice_info.memo)
.bind(invoice_info.bolt11)
.execute(&mut tx)
.await.unwrap();
debug!("Invoice added");
tx.commit().await?;
Ok(())
}
/// Update invoice record
async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result<String> {
debug!("Payment Hash: {}", payment_hash);
let query = "SELECT pubkey, status, amount FROM invoice WHERE payment_hash=$1;";
let (pubkey, prev_invoice_status, amount) =
sqlx::query_as::<_, (String, InvoiceStatus, i64)>(query)
.bind(payment_hash)
.fetch_optional(&self.conn_write)
.await?
.ok_or(error::Error::SqlxError(RowNotFound))?;
// If the invoice is paid update the confirmed at timestamp
let query = if status.eq(&InvoiceStatus::Paid) {
"UPDATE invoice SET status=$1, confirmed_at = now() WHERE payment_hash=$2;"
} else {
"UPDATE invoice SET status=$1 WHERE payment_hash=$2;"
};
sqlx::query(query)
.bind(&status)
.bind(payment_hash)
.execute(&self.conn_write)
.await?;
if prev_invoice_status.eq(&InvoiceStatus::Unpaid) && status.eq(&InvoiceStatus::Paid) {
sqlx::query("UPDATE account SET balance = balance + $1 WHERE pubkey = $2")
.bind(amount)
.bind(&pubkey)
.execute(&self.conn_write)
.await?;
}
Ok(pubkey)
}
/// Get the most recent invoice for a given pubkey
/// invoice must be unpaid and not expired
async fn get_unpaid_invoice(&self, pubkey: &Keys) -> Result<Option<InvoiceInfo>> {
let query = r#"
SELECT amount, payment_hash, description, invoice
FROM invoice
WHERE pubkey = $1
ORDER BY created_at DESC
LIMIT 1;
"#;
match sqlx::query_as::<_, (i64, String, String, String)>(query)
.bind(pubkey.public_key().to_string())
.fetch_optional(&self.conn_write)
.await
.unwrap()
{
Some((amount, payment_hash, description, invoice)) => Ok(Some(InvoiceInfo {
pubkey: pubkey.public_key().to_string(),
payment_hash,
bolt11: invoice,
amount: amount as u64,
status: InvoiceStatus::Unpaid,
memo: description,
confirmed_at: None,
})),
None => Ok(None),
}
}
}
/// Create a dynamic SQL query and params from a subscription filter.
@ -510,146 +725,69 @@ fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
let mut query = QueryBuilder::new("SELECT e.\"content\", e.created_at FROM \"event\" e WHERE ");
// This tracks whether we need to push a prefix AND before adding another clause
let mut push_and = false;
// Query for "authors", allowing prefix matches
if let Some(auth_vec) = &f.authors {
// filter out non-hex values
let auth_vec: Vec<&String> = auth_vec.iter().filter(|a| is_hex(a)).collect();
if !auth_vec.is_empty() {
query.push("(");
// shortcut authors into "IN" query
let any_is_range = auth_vec.iter().any(|pk| pk.len() != 64);
if !any_is_range {
query.push("e.pub_key in (");
let mut pk_sep = query.separated(", ");
for pk in auth_vec.iter() {
pk_sep.push_bind(hex::decode(pk).ok());
}
query.push(") OR e.delegated_by in (");
let mut pk_delegated_sep = query.separated(", ");
for pk in auth_vec.iter() {
pk_delegated_sep.push_bind(hex::decode(pk).ok());
}
query.push(")");
push_and = true;
} else {
let mut range_authors = query.separated(" OR ");
for auth in auth_vec {
match hex_range(auth) {
Some(HexSearch::Exact(ex)) => {
range_authors
.push("(e.pub_key = ")
.push_bind_unseparated(ex.clone())
.push_unseparated(" OR e.delegated_by = ")
.push_bind_unseparated(ex)
.push_unseparated(")");
}
Some(HexSearch::Range(lower, upper)) => {
range_authors
.push("((e.pub_key > ")
.push_bind_unseparated(lower.clone())
.push_unseparated(" AND e.pub_key < ")
.push_bind_unseparated(upper.clone())
.push_unseparated(") OR (e.delegated_by > ")
.push_bind_unseparated(lower)
.push_unseparated(" AND e.delegated_by < ")
.push_bind_unseparated(upper)
.push_unseparated("))");
}
Some(HexSearch::LowerOnly(lower)) => {
range_authors
.push("(e.pub_key > ")
.push_bind_unseparated(lower.clone())
.push_unseparated(" OR e.delegated_by > ")
.push_bind_unseparated(lower)
.push_unseparated(")");
}
None => {
info!("Could not parse hex range from author {:?}", auth);
}
}
push_and = true;
}
}
query.push(")");
if auth_vec.is_empty() {
return None;
}
query.push("(e.pub_key in (");
let mut pk_sep = query.separated(", ");
for pk in auth_vec.iter() {
pk_sep.push_bind(hex::decode(pk).ok());
}
query.push(") OR e.delegated_by in (");
let mut pk_delegated_sep = query.separated(", ");
for pk in auth_vec.iter() {
pk_delegated_sep.push_bind(hex::decode(pk).ok());
}
push_and = true;
query.push("))");
}
// Query for Kind
if let Some(ks) = &f.kinds {
if !ks.is_empty() {
if push_and {
query.push(" AND ");
}
push_and = true;
query.push("e.kind in (");
let mut list_query = query.separated(", ");
for k in ks.iter() {
list_query.push_bind(*k as i64);
}
query.push(")");
if ks.is_empty() {
return None;
}
if push_and {
query.push(" AND ");
}
push_and = true;
query.push("e.kind in (");
let mut list_query = query.separated(", ");
for k in ks.iter() {
list_query.push_bind(*k as i64);
}
query.push(")");
}
// Query for event, allowing prefix matches
// Query for event,
if let Some(id_vec) = &f.ids {
// filter out non-hex values
let id_vec: Vec<&String> = id_vec.iter().filter(|a| is_hex(a)).collect();
if !id_vec.is_empty() {
if push_and {
query.push(" AND (");
} else {
query.push("(");
}
push_and = true;
// shortcut ids into "IN" query
let any_is_range = id_vec.iter().any(|pk| pk.len() != 64);
if !any_is_range {
query.push("id in (");
let mut sep = query.separated(", ");
for id in id_vec.iter() {
sep.push_bind(hex::decode(id).ok());
}
query.push(")");
} else {
// take each author and convert to a hex search
let mut id_query = query.separated(" OR ");
for id in id_vec {
match hex_range(id) {
Some(HexSearch::Exact(ex)) => {
id_query
.push("(id = ")
.push_bind_unseparated(ex)
.push_unseparated(")");
}
Some(HexSearch::Range(lower, upper)) => {
id_query
.push("(id > ")
.push_bind_unseparated(lower)
.push_unseparated(" AND id < ")
.push_bind_unseparated(upper)
.push_unseparated(")");
}
Some(HexSearch::LowerOnly(lower)) => {
id_query
.push("(id > ")
.push_bind_unseparated(lower)
.push_unseparated(")");
}
None => {
info!("Could not parse hex range from id {:?}", id);
}
}
}
}
query.push(")");
if id_vec.is_empty() {
return None;
}
if push_and {
query.push(" AND (");
} else {
query.push("(");
}
push_and = true;
query.push("id in (");
let mut sep = query.separated(", ");
for id in id_vec.iter() {
sep.push_bind(hex::decode(id).ok());
}
query.push("))");
}
// Query for tags
@ -660,22 +798,46 @@ fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
}
push_and = true;
let mut push_or = false;
query.push("e.id IN (SELECT ee.id FROM \"event\" ee LEFT JOIN tag t on ee.id = t.event_id WHERE ee.hidden != 1::bit(1) and ");
for (key, val) in map.iter() {
query.push("e.id IN (SELECT ee.id FROM \"event\" ee LEFT JOIN tag t on ee.id = t.event_id WHERE ee.hidden != 1::bit(1) and (t.\"name\" = ")
if val.is_empty() {
return None;
}
if push_or {
query.push(" OR ");
}
query
.push("(t.\"name\" = ")
.push_bind(key.to_string())
.push(" AND (value in (");
.push(" AND (");
// plain value match first
let mut tag_query = query.separated(", ");
for v in val.iter() {
if (v.len() % 2 != 0) && !is_lower_hex(v) {
let has_plain_values = val.iter().any(|v| (v.len() % 2 != 0 || !is_lower_hex(v)));
let has_hex_values = val.iter().any(|v| v.len() % 2 == 0 && is_lower_hex(v));
if has_plain_values {
query.push("value in (");
// plain value match first
let mut tag_query = query.separated(", ");
for v in val.iter().filter(|v| v.len() % 2 != 0 || !is_lower_hex(v)) {
tag_query.push_bind(v.as_bytes());
} else {
}
}
if has_plain_values && has_hex_values {
query.push(") OR ");
}
if has_hex_values {
query.push("value_hex in (");
// plain value match first
let mut tag_query = query.separated(", ");
for v in val.iter().filter(|v| v.len() % 2 == 0 && is_lower_hex(v)) {
tag_query.push_bind(hex::decode(v).ok());
}
}
query.push("))))");
query.push(")))");
push_or = true;
}
query.push(")");
}
}
@ -686,7 +848,7 @@ fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
}
push_and = true;
query
.push("e.created_at > ")
.push("e.created_at >= ")
.push_bind(Utc.timestamp_opt(f.since.unwrap() as i64, 0).unwrap());
}
@ -697,7 +859,7 @@ fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
}
push_and = true;
query
.push("e.created_at < ")
.push("e.created_at <= ")
.push_bind(Utc.timestamp_opt(f.until.unwrap() as i64, 0).unwrap());
}
@ -707,6 +869,8 @@ fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
} else {
query.push("e.hidden != 1::bit(1)");
}
// never display expired events
query.push(" AND (e.expires_at IS NULL OR e.expires_at > now())");
// Apply per-filter limit to this query.
// The use of a LIMIT implies a DESC order, to capture only the most recent events.
@ -722,15 +886,17 @@ fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
impl FromRow<'_, PgRow> for VerificationRecord {
fn from_row(row: &'_ PgRow) -> std::result::Result<Self, Error> {
let name =
Nip05Name::try_from(row.get::<'_, &str, &str>("name")).or(Err(RowNotFound))?;
let name = Nip05Name::try_from(row.get::<'_, &str, &str>("name")).or(Err(RowNotFound))?;
Ok(VerificationRecord {
rowid: row.get::<'_, i64, &str>("id") as u64,
name,
address: hex::encode(row.get::<'_, Vec<u8>, &str>("pub_key")),
event: hex::encode(row.get::<'_, Vec<u8>, &str>("event_id")),
event_created: row.get::<'_, DateTime<Utc>, &str>("created_at").timestamp() as u64,
last_success: None,
last_success: match row.try_get::<'_, DateTime<Utc>, &str>("verified_at") {
Ok(x) => Some(x.timestamp() as u64),
_ => None,
},
last_failure: match row.try_get::<'_, DateTime<Utc>, &str>("failed_at") {
Ok(x) => Some(x.timestamp() as u64),
_ => None,
@ -739,3 +905,111 @@ impl FromRow<'_, PgRow> for VerificationRecord {
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::{HashMap, HashSet};
#[test]
fn test_query_gen_tag_value_hex() {
let filter = ReqFilter {
ids: None,
kinds: Some(vec![1000]),
since: None,
until: None,
authors: Some(vec![
"84de35e2584d2b144aae823c9ed0b0f3deda09648530b93d1a2a146d1dea9864".to_owned(),
]),
limit: None,
tags: Some(HashMap::from([(
'p',
HashSet::from([
"63fe6318dc58583cfe16810f86dd09e18bfd76aabc24a0081ce2856f330504ed".to_owned(),
]),
)])),
force_no_match: false,
};
let q = query_from_filter(&filter).unwrap();
assert_eq!(q.sql(), "SELECT e.\"content\", e.created_at FROM \"event\" e WHERE (e.pub_key in ($1) OR e.delegated_by in ($2)) AND e.kind in ($3) AND e.id IN (SELECT ee.id FROM \"event\" ee LEFT JOIN tag t on ee.id = t.event_id WHERE ee.hidden != 1::bit(1) and (t.\"name\" = $4 AND (value_hex in ($5)))) AND e.hidden != 1::bit(1) AND (e.expires_at IS NULL OR e.expires_at > now()) ORDER BY e.created_at ASC LIMIT 1000")
}
#[test]
fn test_query_gen_tag_value() {
let filter = ReqFilter {
ids: None,
kinds: Some(vec![1000]),
since: None,
until: None,
authors: Some(vec![
"84de35e2584d2b144aae823c9ed0b0f3deda09648530b93d1a2a146d1dea9864".to_owned(),
]),
limit: None,
tags: Some(HashMap::from([('d', HashSet::from(["test".to_owned()]))])),
force_no_match: false,
};
let q = query_from_filter(&filter).unwrap();
assert_eq!(q.sql(), "SELECT e.\"content\", e.created_at FROM \"event\" e WHERE (e.pub_key in ($1) OR e.delegated_by in ($2)) AND e.kind in ($3) AND e.id IN (SELECT ee.id FROM \"event\" ee LEFT JOIN tag t on ee.id = t.event_id WHERE ee.hidden != 1::bit(1) and (t.\"name\" = $4 AND (value in ($5)))) AND e.hidden != 1::bit(1) AND (e.expires_at IS NULL OR e.expires_at > now()) ORDER BY e.created_at ASC LIMIT 1000")
}
#[test]
fn test_query_gen_tag_value_and_value_hex() {
let filter = ReqFilter {
ids: None,
kinds: Some(vec![1000]),
since: None,
until: None,
authors: Some(vec![
"84de35e2584d2b144aae823c9ed0b0f3deda09648530b93d1a2a146d1dea9864".to_owned(),
]),
limit: None,
tags: Some(HashMap::from([(
'd',
HashSet::from([
"test".to_owned(),
"63fe6318dc58583cfe16810f86dd09e18bfd76aabc24a0081ce2856f330504ed".to_owned(),
]),
)])),
force_no_match: false,
};
let q = query_from_filter(&filter).unwrap();
assert_eq!(q.sql(), "SELECT e.\"content\", e.created_at FROM \"event\" e WHERE (e.pub_key in ($1) OR e.delegated_by in ($2)) AND e.kind in ($3) AND e.id IN (SELECT ee.id FROM \"event\" ee LEFT JOIN tag t on ee.id = t.event_id WHERE ee.hidden != 1::bit(1) and (t.\"name\" = $4 AND (value in ($5) OR value_hex in ($6)))) AND e.hidden != 1::bit(1) AND (e.expires_at IS NULL OR e.expires_at > now()) ORDER BY e.created_at ASC LIMIT 1000")
}
#[test]
fn test_query_multiple_tags() {
let filter = ReqFilter {
ids: None,
kinds: Some(vec![30_001]),
since: None,
until: None,
authors: None,
limit: None,
tags: Some(HashMap::from([
('d', HashSet::from(["follow".to_owned()])),
('t', HashSet::from(["siamstr".to_owned()])),
])),
force_no_match: false,
};
let q = query_from_filter(&filter).unwrap();
assert_eq!(q.sql(), "SELECT e.\"content\", e.created_at FROM \"event\" e WHERE e.kind in ($1) AND e.id IN (SELECT ee.id FROM \"event\" ee LEFT JOIN tag t on ee.id = t.event_id WHERE ee.hidden != 1::bit(1) and (t.\"name\" = $2 AND (value in ($3))) OR (t.\"name\" = $4 AND (value in ($5)))) AND e.hidden != 1::bit(1) AND (e.expires_at IS NULL OR e.expires_at > now()) ORDER BY e.created_at ASC LIMIT 1000")
}
#[test]
fn test_query_empty_tags() {
let filter = ReqFilter {
ids: None,
kinds: Some(vec![1, 6, 16, 30023, 1063, 6969]),
since: Some(1700697846),
until: None,
authors: None,
limit: None,
tags: Some(HashMap::from([('a', HashSet::new())])),
force_no_match: false,
};
assert!(query_from_filter(&filter).is_none());
}
}

View File

@ -35,6 +35,8 @@ pub async fn run_migrations(db: &PostgresPool) -> crate::error::Result<usize> {
m002::rebuild_tags(db).await?;
}
run_migration(m003::migration(), db).await;
run_migration(m004::migration(), db).await;
run_migration(m005::migration(), db).await;
Ok(current_version(db).await as usize)
}
@ -121,7 +123,7 @@ CREATE TABLE "tag" (
CREATE INDEX tag_event_id_idx ON tag USING btree (event_id, name);
CREATE INDEX tag_value_idx ON tag USING btree (value);
-- NIP-05 Verfication table
-- NIP-05 Verification table
CREATE TABLE "user_verification" (
id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY,
event_id bytea NOT NULL,
@ -203,7 +205,7 @@ CREATE INDEX tag_value_hex_idx ON tag USING btree (value_hex);
let event: Event = serde_json::from_str(&String::from_utf8(event_bytes).unwrap())?;
for t in event.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagname = t.first().unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
@ -213,7 +215,7 @@ CREATE INDEX tag_value_hex_idx ON tag USING btree (value_hex);
// insert as BLOB if we can restore it losslessly.
// this means it needs to be even length and lowercase.
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
let q = "INSERT INTO tag (event_id, \"name\", value_hex) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING;";
let q = "INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES ($1, $2, NULL, $3) ON CONFLICT DO NOTHING;";
sqlx::query(q)
.bind(&event_id)
.bind(tagname)
@ -221,7 +223,7 @@ CREATE INDEX tag_value_hex_idx ON tag USING btree (value_hex);
.execute(&mut update_tx)
.await?;
} else {
let q = "INSERT INTO tag (event_id, \"name\", value) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING;";
let q = "INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES ($1, $2, $3, NULL) ON CONFLICT DO NOTHING;";
sqlx::query(q)
.bind(&event_id)
.bind(tagname)
@ -250,7 +252,67 @@ mod m003 {
sql: vec![
r#"
-- Add unique constraint on tag
ALTER TABLE tag ADD CONSTRAINT unique_constraint_name UNIQUE (event_id, "name", value);
ALTER TABLE tag ADD CONSTRAINT unique_constraint_name UNIQUE (event_id, "name", value, value_hex);
"#,
],
}
}
}
mod m004 {
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
pub const VERSION: i64 = 4;
pub fn migration() -> impl Migration {
SimpleSqlMigration {
serial_number: VERSION,
sql: vec![
r#"
-- Add expiration time for events
ALTER TABLE event ADD COLUMN expires_at timestamp(0) with time zone;
-- Index expiration time
CREATE INDEX event_expires_at_idx ON "event" (expires_at);
"#,
],
}
}
}
mod m005 {
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
pub const VERSION: i64 = 5;
pub fn migration() -> impl Migration {
SimpleSqlMigration {
serial_number: VERSION,
sql: vec![
r#"
-- Create account table
CREATE TABLE "account" (
pubkey varchar NOT NULL,
is_admitted BOOLEAN NOT NULL DEFAULT FALSE,
balance BIGINT NOT NULL DEFAULT 0,
tos_accepted_at TIMESTAMP,
CONSTRAINT account_pkey PRIMARY KEY (pubkey)
);
CREATE TYPE status AS ENUM ('Paid', 'Unpaid', 'Expired');
CREATE TABLE "invoice" (
payment_hash varchar NOT NULL,
pubkey varchar NOT NULL,
invoice varchar NOT NULL,
amount BIGINT NOT NULL,
status status NOT NULL DEFAULT 'Unpaid',
description varchar,
created_at timestamp,
confirmed_at timestamp,
CONSTRAINT invoice_payment_hash PRIMARY KEY (payment_hash),
CONSTRAINT invoice_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES account (pubkey) ON DELETE CASCADE
);
"#,
],
}

View File

@ -1,34 +1,34 @@
//! Event persistence and querying
//use crate::config::SETTINGS;
use crate::config::Settings;
use crate::error::Result;
use crate::db::QueryResult;
use crate::error::{Error::SqlError, Result};
use crate::event::{single_char_tagname, Event};
use crate::hexrange::hex_range;
use crate::hexrange::HexSearch;
use crate::repo::sqlite_migration::{STARTUP_SQL,upgrade_db};
use crate::utils::{is_hex};
use crate::nip05::{Nip05Name, VerificationRecord};
use crate::subscription::{ReqFilter, Subscription};
use crate::payment::{InvoiceInfo, InvoiceStatus};
use crate::repo::sqlite_migration::{upgrade_db, STARTUP_SQL};
use crate::server::NostrMetrics;
use crate::subscription::{ReqFilter, Subscription};
use crate::utils::{is_hex, unix_time};
use async_trait::async_trait;
use hex;
use r2d2;
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::params;
use rusqlite::types::ToSql;
use rusqlite::OpenFlags;
use tokio::sync::{Mutex, MutexGuard, Semaphore};
use std::fmt::Write as _;
use std::path::Path;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use std::time::Instant;
use tokio::sync::{Mutex, MutexGuard, Semaphore};
use tokio::task;
use tracing::{debug, info, trace, warn};
use async_trait::async_trait;
use crate::db::QueryResult;
use crate::repo::{now_jitter, NostrRepo};
use nostr::key::Keys;
pub type SqlitePool = r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>;
pub type PooledConnection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnectionManager>;
@ -54,12 +54,13 @@ pub struct SqliteRepo {
impl SqliteRepo {
// build all the pools needed
#[must_use] pub fn new(settings: &Settings, metrics: NostrMetrics) -> SqliteRepo {
#[must_use]
pub fn new(settings: &Settings, metrics: NostrMetrics) -> SqliteRepo {
let write_pool = build_pool(
"writer",
settings,
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
1,
0,
2,
false,
);
@ -67,7 +68,7 @@ impl SqliteRepo {
"maintenance",
settings,
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
1,
0,
2,
true,
);
@ -110,7 +111,8 @@ impl SqliteRepo {
// get relevant fields from event and convert to blobs.
let id_blob = hex::decode(&e.id).ok();
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
let delegator_blob: Option<Vec<u8>> = e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
let delegator_blob: Option<Vec<u8>> =
e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
let event_str = serde_json::to_string(&e).ok();
// check for replaceable events that would hide this one; we won't even attempt to insert these.
if e.is_replaceable() {
@ -130,13 +132,13 @@ impl SqliteRepo {
// the same author/kind/tag value exist, and we can ignore
// this event.
if repl_count.ok().is_some() {
return Ok(0)
return Ok(0);
}
}
// ignore if the event hash is a duplicate.
let mut ins_count = tx.execute(
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, strftime('%s','now'), FALSE);",
params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str]
"INSERT OR IGNORE INTO event (event_hash, created_at, expires_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, strftime('%s','now'), FALSE);",
params![id_blob, e.created_at, e.expiration(), e.kind, pubkey_blob, delegator_blob, event_str]
)? as u64;
if ins_count == 0 {
// if the event was a duplicate, no need to insert event or
@ -187,7 +189,7 @@ impl SqliteRepo {
// if this event is parameterized replaceable, remove other events.
if let Some(d_tag) = e.distinct_param() {
let update_count = tx.execute(
"DELETE FROM event WHERE kind=? AND author=? AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=? AND e.author=? AND t.name='d' AND t.value=? ORDER BY created_at DESC LIMIT -1 OFFSET 1);",
"DELETE FROM event WHERE kind=? AND author=? AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=? AND e.author=? AND t.name='d' AND t.value=? ORDER BY t.created_at DESC LIMIT -1 OFFSET 1);",
params![e.kind, pubkey_blob, e.kind, pubkey_blob, d_tag])?;
if update_count > 0 {
info!(
@ -249,21 +251,32 @@ impl SqliteRepo {
#[async_trait]
impl NostrRepo for SqliteRepo {
async fn start(&self) -> Result<()> {
db_checkpoint_task(self.maint_pool.clone(), Duration::from_secs(60), self.checkpoint_in_progress.clone()).await
db_checkpoint_task(
self.maint_pool.clone(),
Duration::from_secs(60),
self.write_in_progress.clone(),
self.checkpoint_in_progress.clone(),
)
.await?;
cleanup_expired(
self.maint_pool.clone(),
Duration::from_secs(600),
self.write_in_progress.clone(),
)
.await
}
async fn migrate_up(&self) -> Result<usize> {
let _write_guard = self.write_in_progress.lock().await;
let mut conn = self.write_pool.get()?;
task::spawn_blocking(move || {
upgrade_db(&mut conn)
}).await?
task::spawn_blocking(move || upgrade_db(&mut conn)).await?
}
/// Persist event to database
async fn write_event(&self, e: &Event) -> Result<u64> {
let start = Instant::now();
let max_write_attempts = 10;
let mut attempts = 0;
let _write_guard = self.write_in_progress.lock().await;
// spawn a blocking thread
//let mut conn = self.write_pool.get()?;
@ -271,8 +284,32 @@ impl NostrRepo for SqliteRepo {
let e = e.clone();
let event_count = task::spawn_blocking(move || {
let mut conn = pool.get()?;
SqliteRepo::persist_event(&mut conn, &e)
}).await?;
// this could fail because the database was busy; try
// multiple times before giving up.
loop {
attempts += 1;
let wr = SqliteRepo::persist_event(&mut conn, &e);
match wr {
Err(SqlError(rusqlite::Error::SqliteFailure(e, _))) => {
// this basically means that NIP-05 or another
// writer was using the database between us
// reading and promoting the connection to a
// write lock.
info!(
"event write failed, DB locked (attempt: {}); sqlite err: {}",
attempts, e.extended_code
);
}
_ => {
return wr;
}
}
if attempts >= max_write_attempts {
return wr;
}
}
})
.await?;
self.metrics
.write_events
.observe(start.elapsed().as_secs_f64());
@ -297,9 +334,14 @@ impl NostrRepo for SqliteRepo {
// thread pool waiting for queries to finish under high load.
// Instead, don't bother spawning threads when they will just
// block on a database connection.
let sem = self.reader_threads_ready.clone().acquire_owned().await.unwrap();
let self=self.clone();
let metrics=self.metrics.clone();
let sem = self
.reader_threads_ready
.clone()
.acquire_owned()
.await
.unwrap();
let self = self.clone();
let metrics = self.metrics.clone();
task::spawn_blocking(move || {
{
// if we are waiting on a checkpoint, stop until it is complete
@ -324,7 +366,10 @@ impl NostrRepo for SqliteRepo {
}
// check before getting a DB connection if the client still wants the results
if abandon_query_rx.try_recv().is_ok() {
debug!("query cancelled by client (before execution) (cid: {}, sub: {:?})", client_id, sub.id);
debug!(
"query cancelled by client (before execution) (cid: {}, sub: {:?})",
client_id, sub.id
);
return Ok(());
}
@ -337,7 +382,9 @@ impl NostrRepo for SqliteRepo {
if let Ok(mut conn) = self.read_pool.get() {
{
let pool_state = self.read_pool.state();
metrics.db_connections.set((pool_state.connections - pool_state.idle_connections).into());
metrics
.db_connections
.set((pool_state.connections - pool_state.idle_connections).into());
}
for filter in sub.filters.iter() {
let filter_start = Instant::now();
@ -354,7 +401,7 @@ impl NostrRepo for SqliteRepo {
let mut last_successful_send = Instant::now();
// execute the query.
// make the actual SQL query (with parameters inserted) available
conn.trace(Some(|x| {trace!("SQL trace: {:?}", x)}));
conn.trace(Some(|x| trace!("SQL trace: {:?}", x)));
let mut stmt = conn.prepare_cached(&q)?;
let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?;
@ -372,7 +419,10 @@ impl NostrRepo for SqliteRepo {
if slow_first_event && client_id.starts_with('0') {
debug!(
"filter first result in {:?} (slow): {} (cid: {}, sub: {:?})",
first_event_elapsed, serde_json::to_string(&filter)?, client_id, sub.id
first_event_elapsed,
serde_json::to_string(&filter)?,
client_id,
sub.id
);
}
first_result = false;
@ -382,8 +432,14 @@ impl NostrRepo for SqliteRepo {
{
if self.checkpoint_in_progress.try_lock().is_err() {
// lock was held, abort this query
debug!("query aborted due to checkpoint (cid: {}, sub: {:?})", client_id, sub.id);
metrics.query_aborts.with_label_values(&["checkpoint"]).inc();
debug!(
"query aborted due to checkpoint (cid: {}, sub: {:?})",
client_id, sub.id
);
metrics
.query_aborts
.with_label_values(&["checkpoint"])
.inc();
return Ok(());
}
}
@ -391,7 +447,10 @@ impl NostrRepo for SqliteRepo {
// check if this is still active; every 100 rows
if row_count % 100 == 0 && abandon_query_rx.try_recv().is_ok() {
debug!("query cancelled by client (cid: {}, sub: {:?})", client_id, sub.id);
debug!(
"query cancelled by client (cid: {}, sub: {:?})",
client_id, sub.id
);
return Ok(());
}
row_count += 1;
@ -407,19 +466,31 @@ impl NostrRepo for SqliteRepo {
// the queue has been full for too long, abort
info!("aborting database query due to slow client (cid: {}, sub: {:?})",
client_id, sub.id);
metrics.query_aborts.with_label_values(&["slowclient"]).inc();
metrics
.query_aborts
.with_label_values(&["slowclient"])
.inc();
let ok: Result<()> = Ok(());
return ok;
}
// check if a checkpoint is trying to run, and abort
if self.checkpoint_in_progress.try_lock().is_err() {
// lock was held, abort this query
debug!("query aborted due to checkpoint (cid: {}, sub: {:?})", client_id, sub.id);
metrics.query_aborts.with_label_values(&["checkpoint"]).inc();
debug!(
"query aborted due to checkpoint (cid: {}, sub: {:?})",
client_id, sub.id
);
metrics
.query_aborts
.with_label_values(&["checkpoint"])
.inc();
return Ok(());
}
// give the queue a chance to clear before trying again
debug!("query thread sleeping due to full query_tx (cid: {}, sub: {:?})", client_id, sub.id);
debug!(
"query thread sleeping due to full query_tx (cid: {}, sub: {:?})",
client_id, sub.id
);
thread::sleep(Duration::from_millis(500));
}
// TODO: we could use try_send, but we'd have to juggle
@ -440,10 +511,12 @@ impl NostrRepo for SqliteRepo {
if filter_start.elapsed() > slow_cutoff && client_id.starts_with('0') {
debug!(
"query filter req (slow): {} (cid: {}, sub: {:?}, filter: {})",
serde_json::to_string(&filter)?, client_id, sub.id, filter_count
serde_json::to_string(&filter)?,
client_id,
sub.id,
filter_count
);
}
}
} else {
warn!("Could not get a database connection for querying");
@ -479,7 +552,8 @@ impl NostrRepo for SqliteRepo {
let start = Instant::now();
conn.execute_batch("PRAGMA optimize;").ok();
info!("optimize ran in {:?}", start.elapsed());
}).await?;
})
.await?;
Ok(())
}
@ -488,6 +562,7 @@ impl NostrRepo for SqliteRepo {
let e = hex::decode(event_id).ok();
let n = name.to_owned();
let mut conn = self.write_pool.get()?;
let _write_guard = self.write_in_progress.lock().await;
tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
{
@ -515,6 +590,7 @@ impl NostrRepo for SqliteRepo {
/// Update verification timestamp
async fn update_verification_timestamp(&self, id: u64) -> Result<()> {
let mut conn = self.write_pool.get()?;
let _write_guard = self.write_in_progress.lock().await;
tokio::task::spawn_blocking(move || {
// add some jitter to the verification to prevent everything from stacking up together.
let verif_time = now_jitter(600);
@ -530,13 +606,13 @@ impl NostrRepo for SqliteRepo {
let ok: Result<()> = Ok(());
ok
})
.await?
.await?
}
/// Update verification record as failed
async fn fail_verification(&self, id: u64) -> Result<()> {
let mut conn = self.write_pool.get()?;
let _write_guard = self.write_in_progress.lock().await;
tokio::task::spawn_blocking(move || {
// add some jitter to the verification to prevent everything from stacking up together.
let fail_time = now_jitter(600);
@ -556,6 +632,7 @@ impl NostrRepo for SqliteRepo {
/// Delete verification record
async fn delete_verification(&self, id: u64) -> Result<()> {
let mut conn = self.write_pool.get()?;
let _write_guard = self.write_in_progress.lock().await;
tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
{
@ -567,7 +644,7 @@ impl NostrRepo for SqliteRepo {
let ok: Result<()> = Ok(());
ok
})
.await?
.await?
}
/// Get the latest verification record for a given pubkey.
@ -645,6 +722,209 @@ impl NostrRepo for SqliteRepo {
Ok(vr)
}).await?
}
/// Create account
async fn create_account(&self, pub_key: &Keys) -> Result<bool> {
let pub_key = pub_key.public_key().to_string();
let mut conn = self.write_pool.get()?;
let ins_count = tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
let ins_count: u64;
{
// Ignore if user is already in db
let query = "INSERT OR IGNORE INTO account (pubkey, is_admitted, balance) VALUES (?1, ?2, ?3);";
let mut stmt = tx.prepare(query)?;
ins_count = stmt.execute(params![&pub_key, false, 0])? as u64;
}
tx.commit()?;
let ok: Result<u64> = Ok(ins_count);
ok
}).await??;
if ins_count != 1 {
return Ok(false);
}
Ok(true)
}
/// Admit account
async fn admit_account(&self, pub_key: &Keys, admission_cost: u64) -> Result<()> {
let pub_key = pub_key.public_key().to_string();
let mut conn = self.write_pool.get()?;
let pub_key = pub_key.to_owned();
tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
{
let query = "UPDATE account SET is_admitted = TRUE, tos_accepted_at = strftime('%s','now'), balance = balance - ?1 WHERE pubkey=?2;";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![admission_cost, pub_key])?;
}
tx.commit()?;
let ok: Result<()> = Ok(());
ok
})
.await?
}
/// Gets if the account is admitted and balance
async fn get_account_balance(&self, pub_key: &Keys) -> Result<(bool, u64)> {
let pub_key = pub_key.public_key().to_string();
let mut conn = self.write_pool.get()?;
let pub_key = pub_key.to_owned();
tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
let query = "SELECT is_admitted, balance FROM account WHERE pubkey = ?1;";
let mut stmt = tx.prepare_cached(query)?;
let fields = stmt.query_row(params![pub_key], |r| {
let is_admitted: bool = r.get(0)?;
let balance: u64 = r.get(1)?;
// create a tuple since we can't throw non-rusqlite errors in this closure
Ok((is_admitted, balance))
})?;
Ok(fields)
})
.await?
}
/// Update account balance
async fn update_account_balance(
&self,
pub_key: &Keys,
positive: bool,
new_balance: u64,
) -> Result<()> {
let pub_key = pub_key.public_key().to_string();
let mut conn = self.write_pool.get()?;
tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
{
let query = if positive {
"UPDATE account SET balance=balance + ?1 WHERE pubkey=?2"
} else {
"UPDATE account SET balance=balance - ?1 WHERE pubkey=?2"
};
let mut stmt = tx.prepare(query)?;
stmt.execute(params![new_balance, pub_key])?;
}
tx.commit()?;
let ok: Result<()> = Ok(());
ok
})
.await?
}
/// Create invoice record
async fn create_invoice_record(&self, pub_key: &Keys, invoice_info: InvoiceInfo) -> Result<()> {
let pub_key = pub_key.public_key().to_string();
let pub_key = pub_key.to_owned();
let mut conn = self.write_pool.get()?;
tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
{
let query = "INSERT INTO invoice (pubkey, payment_hash, amount, status, description, created_at, invoice) VALUES (?1, ?2, ?3, ?4, ?5, strftime('%s','now'), ?6);";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![&pub_key, invoice_info.payment_hash, invoice_info.amount, invoice_info.status.to_string(), invoice_info.memo, invoice_info.bolt11])?;
}
tx.commit()?;
let ok: Result<()> = Ok(());
ok
}).await??;
Ok(())
}
/// Update invoice record
async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result<String> {
let mut conn = self.write_pool.get()?;
let payment_hash = payment_hash.to_owned();
tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
let pubkey: String;
{
// Get required invoice info for given payment hash
let query = "SELECT pubkey, status, amount FROM invoice WHERE payment_hash=?1;";
let mut stmt = tx.prepare(query)?;
let (pub_key, prev_status, amount) = stmt.query_row(params![payment_hash], |r| {
let pub_key: String = r.get(0)?;
let status: String = r.get(1)?;
let amount: u64 = r.get(2)?;
Ok((pub_key, status, amount))
})?;
// If the invoice is paid update the confirmed_at timestamp
let query = if status.eq(&InvoiceStatus::Paid) {
"UPDATE invoice SET status=?1, confirmed_at = strftime('%s', 'now') WHERE payment_hash=?2;"
} else {
"UPDATE invoice SET status=?1 WHERE payment_hash=?2;"
};
let mut stmt = tx.prepare(query)?;
stmt.execute(params![status.to_string(), payment_hash])?;
// Increase account balance by given invoice amount
if prev_status == "Unpaid" && status.eq(&InvoiceStatus::Paid) {
let query =
"UPDATE account SET balance = balance + ?1 WHERE pubkey = ?2;";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![amount, pub_key])?;
}
pubkey = pub_key;
}
tx.commit()?;
let ok: Result<String> = Ok(pubkey);
ok
})
.await?
}
/// Get the most recent invoice for a given pubkey
/// invoice must be unpaid and not expired
async fn get_unpaid_invoice(&self, pubkey: &Keys) -> Result<Option<InvoiceInfo>> {
let mut conn = self.write_pool.get()?;
let pubkey = pubkey.to_owned();
let pubkey_str = pubkey.clone().public_key().to_string();
let (payment_hash, invoice, amount, description) = tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
let query = r#"
SELECT amount, payment_hash, description, invoice
FROM invoice
WHERE pubkey = ?1 AND status = 'Unpaid'
ORDER BY created_at DESC
LIMIT 1;
"#;
let mut stmt = tx.prepare(query).unwrap();
stmt.query_row(params![&pubkey_str], |r| {
let amount: u64 = r.get(0)?;
let payment_hash: String = r.get(1)?;
let description: String = r.get(2)?;
let invoice: String = r.get(3)?;
Ok((payment_hash, invoice, amount, description))
})
})
.await??;
Ok(Some(InvoiceInfo {
pubkey: pubkey.public_key().to_string(),
payment_hash,
bolt11: invoice,
amount,
status: InvoiceStatus::Unpaid,
memo: description,
confirmed_at: None,
}))
}
}
/// Decide if there is an index that should be used explicitly
@ -655,14 +935,15 @@ fn override_index(f: &ReqFilter) -> Option<String> {
// queries for multiple kinds default to kind_index, which is
// significantly slower than kind_created_at_index.
if let Some(ks) = &f.kinds {
if f.ids.is_none() &&
ks.len() > 1 &&
f.since.is_none() &&
f.until.is_none() &&
f.tags.is_none() &&
f.authors.is_none() {
return Some("kind_created_at_index".into());
}
if f.ids.is_none()
&& ks.len() > 1
&& f.since.is_none()
&& f.until.is_none()
&& f.tags.is_none()
&& f.authors.is_none()
{
return Some("kind_created_at_index".into());
}
}
// if there is an author, it is much better to force the authors index.
if f.authors.is_some() {
@ -695,9 +976,11 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
return (empty_query, empty_params, None);
}
// check if the index needs to be overriden
// check if the index needs to be overridden
let idx_name = override_index(f);
let idx_stmt = idx_name.as_ref().map_or_else(|| "".to_owned(), |i| format!("INDEXED BY {i}"));
let idx_stmt = idx_name
.as_ref()
.map_or_else(|| "".to_owned(), |i| format!("INDEXED BY {i}"));
let mut query = format!("SELECT e.content FROM event e {idx_stmt}");
// query parameters for SQLite
let mut params: Vec<Box<dyn ToSql>> = vec![];
@ -709,26 +992,9 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
// take each author and convert to a hexsearch
let mut auth_searches: Vec<String> = vec![];
for auth in authvec {
match hex_range(auth) {
Some(HexSearch::Exact(ex)) => {
auth_searches.push("author=?".to_owned());
params.push(Box::new(ex));
}
Some(HexSearch::Range(lower, upper)) => {
auth_searches.push(
"(author>? AND author<?)".to_owned(),
);
params.push(Box::new(lower));
params.push(Box::new(upper));
}
Some(HexSearch::LowerOnly(lower)) => {
auth_searches.push("author>?".to_owned());
params.push(Box::new(lower));
}
None => {
info!("Could not parse hex range from author {:?}", auth);
}
}
auth_searches.push("author=?".to_owned());
let auth_bin = hex::decode(auth).ok();
params.push(Box::new(auth_bin));
}
if !authvec.is_empty() {
let auth_clause = format!("({})", auth_searches.join(" OR "));
@ -749,24 +1015,8 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
// take each author and convert to a hexsearch
let mut id_searches: Vec<String> = vec![];
for id in idvec {
match hex_range(id) {
Some(HexSearch::Exact(ex)) => {
id_searches.push("event_hash=?".to_owned());
params.push(Box::new(ex));
}
Some(HexSearch::Range(lower, upper)) => {
id_searches.push("(event_hash>? AND event_hash<?)".to_owned());
params.push(Box::new(lower));
params.push(Box::new(upper));
}
Some(HexSearch::LowerOnly(lower)) => {
id_searches.push("event_hash>?".to_owned());
params.push(Box::new(lower));
}
None => {
info!("Could not parse hex range from id {:?}", id);
}
}
id_searches.push("event_hash=?".to_owned());
params.push(Box::new(id.clone()));
}
if idvec.is_empty() {
// if the ids list was empty, we should never return
@ -789,25 +1039,24 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
// find evidence of the target tag name/value existing for this event.
// Query for Kind/Since/Until additionally, to reduce the number of tags that come back.
let kind_clause;
let since_clause;
let until_clause;
if let Some(ks) = &f.kinds {
// kind is number, no escaping needed
let str_kinds: Vec<String> = ks.iter().map(std::string::ToString::to_string).collect();
let str_kinds: Vec<String> =
ks.iter().map(std::string::ToString::to_string).collect();
kind_clause = format!("AND kind IN ({})", str_kinds.join(", "));
} else {
kind_clause = format!("");
kind_clause = String::new();
};
if f.since.is_some() {
since_clause = format!("AND created_at > {}", f.since.unwrap());
let since_clause = if f.since.is_some() {
format!("AND created_at >= {}", f.since.unwrap())
} else {
since_clause = format!("");
String::new()
};
// Query for timestamp
if f.until.is_some() {
until_clause = format!("AND created_at < {}", f.until.unwrap());
let until_clause = if f.until.is_some() {
format!("AND created_at <= {}", f.until.unwrap())
} else {
until_clause = format!("");
String::new()
};
let tag_clause = format!(
@ -823,16 +1072,19 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
}
// Query for timestamp
if f.since.is_some() {
let created_clause = format!("created_at > {}", f.since.unwrap());
let created_clause = format!("created_at >= {}", f.since.unwrap());
filter_components.push(created_clause);
}
// Query for timestamp
if f.until.is_some() {
let until_clause = format!("created_at < {}", f.until.unwrap());
let until_clause = format!("created_at <= {}", f.until.unwrap());
filter_components.push(until_clause);
}
// never display hidden events
query.push_str(" WHERE hidden!=TRUE");
// never display hidden events
filter_components.push("(expires_at IS NULL OR expires_at > ?)".to_string());
params.push(Box::new(unix_time()));
// build filter component conditions
if !filter_components.is_empty() {
query.push_str(" AND ");
@ -871,7 +1123,7 @@ fn _query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>, Vec<Stri
.map(|s| format!("SELECT distinct content, created_at FROM ({s})"))
.collect();
let query: String = subqueries_selects.join(" UNION ");
(query, params,indexes)
(query, params, indexes)
}
/// Build a database connection pool.
@ -900,7 +1152,7 @@ pub fn build_pool(
}
}
let manager = if settings.database.in_memory {
SqliteConnectionManager::memory()
SqliteConnectionManager::file("file::memory:?cache=shared")
.with_flags(flags)
.with_init(|c| c.execute_batch(STARTUP_SQL))
} else {
@ -912,9 +1164,15 @@ pub fn build_pool(
.test_on_check_out(true) // no noticeable performance hit
.min_idle(Some(min_size))
.max_size(max_size)
.idle_timeout(Some(Duration::from_secs(10)))
.max_lifetime(Some(Duration::from_secs(30)))
.build(manager)
.unwrap();
// retrieve a connection to ensure the startup statements run immediately
{
let _ = pool.get();
}
info!(
"Built a connection pool {:?} (min={}, max={})",
name, min_size, max_size
@ -922,14 +1180,71 @@ pub fn build_pool(
pool
}
/// Cleanup expired events on a regular basis
async fn cleanup_expired(
pool: SqlitePool,
frequency: Duration,
write_in_progress: Arc<Mutex<u64>>,
) -> Result<()> {
tokio::task::spawn(async move {
loop {
tokio::select! {
_ = tokio::time::sleep(frequency) => {
if let Ok(mut conn) = pool.get() {
let mut _guard:Option<MutexGuard<u64>> = None;
// take a write lock to prevent event writes
// from proceeding while we are deleting
// events. This isn't necessary, but
// minimizes the chances of forcing event
// persistence to be retried.
_guard = Some(write_in_progress.lock().await);
let start = Instant::now();
let exp_res = tokio::task::spawn_blocking(move || {
delete_expired(&mut conn)
}).await;
match exp_res {
Ok(Ok(count)) => {
if count > 0 {
info!("removed {} expired events in: {:?}", count, start.elapsed());
}
},
_ => {
// either the task or underlying query failed
info!("there was an error cleaning up expired events: {:?}", exp_res);
}
}
}
}
};
}
});
Ok(())
}
/// Execute a query to delete all expired events
pub fn delete_expired(conn: &mut PooledConnection) -> Result<usize> {
let tx = conn.transaction()?;
let update_count = tx.execute(
"DELETE FROM event WHERE expires_at <= ?",
params![unix_time()],
)?;
tx.commit()?;
Ok(update_count)
}
/// Perform database WAL checkpoint on a regular basis
pub async fn db_checkpoint_task(pool: SqlitePool, frequency: Duration, checkpoint_in_progress: Arc<Mutex<u64>>) -> Result<()> {
pub async fn db_checkpoint_task(
pool: SqlitePool,
frequency: Duration,
write_in_progress: Arc<Mutex<u64>>,
checkpoint_in_progress: Arc<Mutex<u64>>,
) -> Result<()> {
// TODO; use acquire_many on the reader semaphore to stop them from interrupting this.
tokio::task::spawn(async move {
// WAL size in pages.
let mut current_wal_size = 0;
// WAL threshold for more aggressive checkpointing (10,000 pages, or about 40MB)
let wal_threshold = 1000*10;
let wal_threshold = 1000 * 10;
// default threshold for the busy timer
let busy_wait_default = Duration::from_secs(1);
// if the WAL file is getting too big, switch to this
@ -938,6 +1253,8 @@ pub async fn db_checkpoint_task(pool: SqlitePool, frequency: Duration, checkpoin
tokio::select! {
_ = tokio::time::sleep(frequency) => {
if let Ok(mut conn) = pool.get() {
// block all other writers
let _write_guard = write_in_progress.lock().await;
let mut _guard:Option<MutexGuard<u64>> = None;
// the busy timer will block writers, so don't set
// this any higher than you want max latency for event
@ -965,6 +1282,7 @@ pub async fn db_checkpoint_task(pool: SqlitePool, frequency: Duration, checkpoin
}
#[derive(Debug)]
#[allow(dead_code)]
enum SqliteStatus {
Ok,
Busy,
@ -997,7 +1315,6 @@ pub fn checkpoint_db(conn: &mut PooledConnection) -> Result<usize> {
Ok(wal_size as usize)
}
/// Produce a arbitrary list of '?' parameters.
fn repeat_vars(count: usize) -> String {
if count == 0 {
@ -1031,7 +1348,6 @@ fn log_pool_stats(name: &str, pool: &SqlitePool) {
);
}
/// Check if the pool is fully utilized
fn _pool_at_capacity(pool: &SqlitePool) -> bool {
let state: r2d2::State = pool.state();

View File

@ -4,13 +4,13 @@ use crate::error::Result;
use crate::event::{single_char_tagname, Event};
use crate::utils::is_lower_hex;
use const_format::formatcp;
use indicatif::{ProgressBar, ProgressStyle};
use rusqlite::limits::Limit;
use rusqlite::params;
use rusqlite::Connection;
use std::cmp::Ordering;
use std::time::Instant;
use tracing::{debug, error, info};
use indicatif::{ProgressBar, ProgressStyle};
/// Startup DB Pragmas
pub const STARTUP_SQL: &str = r##"
@ -19,11 +19,11 @@ PRAGMA foreign_keys = ON;
PRAGMA journal_size_limit = 32768;
PRAGMA temp_store = 2; -- use memory, not temp files
PRAGMA main.cache_size = 20000; -- 80MB max cache size per conn
pragma mmap_size = 17179869184; -- cap mmap at 16GB
pragma mmap_size = 0; -- disable mmap (default)
"##;
/// Latest database version
pub const DB_VERSION: usize = 16;
pub const DB_VERSION: usize = 18;
/// Schema definition
const INIT_SQL: &str = formatcp!(
@ -43,6 +43,7 @@ id INTEGER PRIMARY KEY,
event_hash BLOB NOT NULL, -- 4-byte hash
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970)
created_at INTEGER NOT NULL, -- when the event was authored
expires_at INTEGER, -- when the event expires and may be deleted
author BLOB NOT NULL, -- author pubkey
delegated_by BLOB, -- delegator pubkey (NIP-26)
kind INTEGER NOT NULL, -- event kind
@ -61,6 +62,7 @@ CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author);
CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at);
CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at);
CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind);
CREATE INDEX IF NOT EXISTS event_expiration ON event(expires_at);
-- Tag Table
-- Tag values are stored as either a BLOB (if they come in as a
@ -94,6 +96,35 @@ FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CAS
);
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
-- Create account table
CREATE TABLE IF NOT EXISTS account (
pubkey TEXT PRIMARY KEY,
is_admitted INTEGER NOT NULL DEFAULT 0,
balance INTEGER NOT NULL DEFAULT 0,
tos_accepted_at INTEGER
);
-- Create account index
CREATE INDEX IF NOT EXISTS user_pubkey_index ON account(pubkey);
-- Invoice table
CREATE TABLE IF NOT EXISTS invoice (
payment_hash TEXT PRIMARY KEY,
pubkey TEXT NOT NULL,
invoice TEXT NOT NULL,
amount INTEGER NOT NULL,
status TEXT CHECK ( status IN ('Paid', 'Unpaid', 'Expired' ) ) NOT NUll DEFAULT 'Unpaid',
description TEXT,
created_at INTEGER NOT NULL,
confirmed_at INTEGER,
CONSTRAINT invoice_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES account (pubkey) ON DELETE CASCADE
);
-- Create invoice index
CREATE INDEX IF NOT EXISTS invoice_pubkey_index ON invoice(pubkey);
"##,
DB_VERSION
);
@ -128,7 +159,7 @@ fn mig_init(conn: &mut PooledConnection) -> usize {
);
}
Err(err) => {
error!("update failed: {}", err);
error!("update (init) failed: {}", err);
panic!("database could not be initialized");
}
}
@ -208,6 +239,12 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<usize> {
if curr_version == 15 {
curr_version = mig_15_to_16(conn)?;
}
if curr_version == 16 {
curr_version = mig_16_to_17(conn)?;
}
if curr_version == 17 {
curr_version = mig_17_to_18(conn)?;
}
if curr_version == DB_VERSION {
info!(
@ -248,8 +285,8 @@ pub fn rebuild_tags(conn: &mut PooledConnection) -> Result<()> {
let mut stmt = tx.prepare("select id, content from event order by id;")?;
let mut tag_rows = stmt.query([])?;
while let Some(row) = tag_rows.next()? {
if (events_processed as f32)/(count as f32) > percent_done {
info!("Tag update {}% complete...", (100.0*percent_done).round());
if (events_processed as f32) / (count as f32) > percent_done {
info!("Tag update {}% complete...", (100.0 * percent_done).round());
percent_done += update_each_percent;
}
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
@ -258,7 +295,7 @@ pub fn rebuild_tags(conn: &mut PooledConnection) -> Result<()> {
let event: Event = serde_json::from_str(&event_json)?;
// look at each event, and each tag, creating new tag entries if appropriate.
for t in event.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagname = t.first().unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
@ -288,9 +325,7 @@ pub fn rebuild_tags(conn: &mut PooledConnection) -> Result<()> {
Ok(())
}
//// Migration Scripts
// Migration Scripts
fn mig_1_to_2(conn: &mut PooledConnection) -> Result<usize> {
// only change is adding a hidden column to events.
@ -304,7 +339,7 @@ PRAGMA user_version = 2;
info!("database schema upgraded v1 -> v2");
}
Err(err) => {
error!("update failed: {}", err);
error!("update (v1->v2) failed: {}", err);
panic!("database could not be upgraded");
}
}
@ -331,7 +366,7 @@ PRAGMA user_version = 3;
info!("database schema upgraded v2 -> v3");
}
Err(err) => {
error!("update failed: {}", err);
error!("update (v2->v3) failed: {}", err);
panic!("database could not be upgraded");
}
}
@ -381,7 +416,7 @@ PRAGMA user_version = 4;
info!("database schema upgraded v3 -> v4");
}
Err(err) => {
error!("update failed: {}", err);
error!("update (v3->v4) failed: {}", err);
panic!("database could not be upgraded");
}
}
@ -400,7 +435,7 @@ PRAGMA user_version=5;
info!("database schema upgraded v4 -> v5");
}
Err(err) => {
error!("update failed: {}", err);
error!("update (v4->v5) failed: {}", err);
panic!("database could not be upgraded");
}
}
@ -426,7 +461,7 @@ fn mig_5_to_6(conn: &mut PooledConnection) -> Result<usize> {
let event: Event = serde_json::from_str(&event_json)?;
// look at each event, and each tag, creating new tag entries if appropriate.
for t in event.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagname = t.first().unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
@ -472,7 +507,7 @@ PRAGMA user_version = 7;
info!("database schema upgraded v6 -> v7");
}
Err(err) => {
error!("update failed: {}", err);
error!("update (v6->v7) failed: {}", err);
panic!("database could not be upgraded");
}
}
@ -493,7 +528,7 @@ PRAGMA user_version = 8;
info!("database schema upgraded v7 -> v8");
}
Err(err) => {
error!("update failed: {}", err);
error!("update (v7->v8) failed: {}", err);
panic!("database could not be upgraded");
}
}
@ -513,7 +548,7 @@ PRAGMA user_version = 9;
info!("database schema upgraded v8 -> v9");
}
Err(err) => {
error!("update failed: {}", err);
error!("update (v8->v9) failed: {}", err);
panic!("database could not be upgraded");
}
}
@ -532,7 +567,7 @@ PRAGMA user_version = 10;
info!("database schema upgraded v9 -> v10");
}
Err(err) => {
error!("update failed: {}", err);
error!("update (v9->v10) failed: {}", err);
panic!("database could not be upgraded");
}
}
@ -553,7 +588,7 @@ PRAGMA user_version = 11;
info!("database schema upgraded v10 -> v11");
}
Err(err) => {
error!("update failed: {}", err);
error!("update (v10->v11) failed: {}", err);
panic!("database could not be upgraded");
}
}
@ -581,11 +616,17 @@ fn mig_11_to_12(conn: &mut PooledConnection) -> Result<usize> {
tx.execute("PRAGMA user_version = 12;", [])?;
}
tx.commit()?;
info!("database schema upgraded v11 -> v12 in {:?}", start.elapsed());
info!(
"database schema upgraded v11 -> v12 in {:?}",
start.elapsed()
);
// vacuum after large table modification
let start = Instant::now();
conn.execute("VACUUM;", [])?;
info!("vacuumed DB after hidden event cleanup in {:?}", start.elapsed());
info!(
"vacuumed DB after hidden event cleanup in {:?}",
start.elapsed()
);
Ok(12)
}
@ -602,7 +643,7 @@ PRAGMA user_version = 13;
info!("database schema upgraded v12 -> v13");
}
Err(err) => {
error!("update failed: {}", err);
error!("update (v12->v13) failed: {}", err);
panic!("database could not be upgraded");
}
}
@ -622,7 +663,7 @@ PRAGMA user_version = 14;
info!("database schema upgraded v13 -> v14");
}
Err(err) => {
error!("update failed: {}", err);
error!("update (v13->v14) failed: {}", err);
panic!("database could not be upgraded");
}
}
@ -641,7 +682,7 @@ PRAGMA user_version = 15;
info!("database schema upgraded v14 -> v15");
}
Err(err) => {
error!("update failed: {}", err);
error!("update (v14->v15) failed: {}", err);
panic!("database could not be upgraded");
}
}
@ -651,7 +692,7 @@ PRAGMA user_version = 15;
match conn.execute_batch(clear_hidden_sql) {
Ok(()) => {
info!("all hidden events removed");
},
}
Err(err) => {
error!("delete failed: {}", err);
panic!("could not remove hidden events");
@ -662,7 +703,7 @@ PRAGMA user_version = 15;
fn mig_15_to_16(conn: &mut PooledConnection) -> Result<usize> {
let count = db_event_count(conn)?;
info!("database schema needs update from 15->16 (this make take a few minutes)");
info!("database schema needs update from 15->16 (this may take a few minutes)");
let upgrade_sql = r##"
DROP TABLE tag;
CREATE TABLE tag (
@ -683,22 +724,22 @@ CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,
let start = Instant::now();
let tx = conn.transaction()?;
let bar = ProgressBar::new(count.try_into().unwrap())
.with_message("rebuilding tags table");
let bar = ProgressBar::new(count.try_into().unwrap()).with_message("rebuilding tags table");
bar.set_style(
ProgressStyle::with_template(
"[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}",
)
.unwrap(),
.unwrap(),
);
{
tx.execute_batch(upgrade_sql)?;
let mut stmt = tx.prepare("select id, kind, created_at, content from event order by id;")?;
let mut stmt =
tx.prepare("select id, kind, created_at, content from event order by id;")?;
let mut tag_rows = stmt.query([])?;
let mut count = 0;
while let Some(row) = tag_rows.next()? {
count += 1;
if count%10==0 {
if count % 10 == 0 {
bar.inc(10);
}
let event_id: u64 = row.get(0)?;
@ -708,7 +749,7 @@ CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,
let event: Event = serde_json::from_str(&event_json)?;
// look at each event, and each tag, creating new tag entries if appropriate.
for t in event.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagname = t.first().unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
@ -726,6 +767,75 @@ CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,
}
bar.finish();
tx.commit()?;
info!("database schema upgraded v15 -> v16 in {:?}", start.elapsed());
info!(
"database schema upgraded v15 -> v16 in {:?}",
start.elapsed()
);
Ok(16)
}
fn mig_16_to_17(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 16->17");
let upgrade_sql = r##"
ALTER TABLE event ADD COLUMN expires_at INTEGER;
CREATE INDEX IF NOT EXISTS event_expiration ON event(expires_at);
PRAGMA user_version = 17;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v16 -> v17");
}
Err(err) => {
error!("update (v16->v17) failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(17)
}
fn mig_17_to_18(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 17->18");
let upgrade_sql = r##"
-- Create invoices table
CREATE TABLE IF NOT EXISTS invoice (
payment_hash TEXT PRIMARY KEY,
pubkey TEXT NOT NULL,
invoice TEXT NOT NULL,
amount INTEGER NOT NULL,
status TEXT CHECK ( status IN ('Paid', 'Unpaid', 'Expired' ) ) NOT NUll DEFAULT 'Unpaid',
description TEXT,
created_at INTEGER NOT NULL,
confirmed_at INTEGER,
CONSTRAINT invoice_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES account (pubkey) ON DELETE CASCADE
);
-- Create invoice index
CREATE INDEX IF NOT EXISTS invoice_pubkey_index ON invoice(pubkey);
-- Create account table
CREATE TABLE IF NOT EXISTS account (
pubkey TEXT PRIMARY KEY,
is_admitted INTEGER NOT NULL DEFAULT 0,
balance INTEGER NOT NULL DEFAULT 0,
tos_accepted_at INTEGER
);
-- Create account index
CREATE INDEX IF NOT EXISTS account_pubkey_index ON account(pubkey);
pragma optimize;
PRAGMA user_version = 18;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v17 -> v18");
}
Err(err) => {
error!("update (v17->v18) failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(18)
}

File diff suppressed because it is too large Load Diff

View File

@ -45,7 +45,8 @@ pub struct ReqFilter {
impl Serialize for ReqFilter {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S:Serializer,
where
S: Serializer,
{
let mut map = serializer.serialize_map(None)?;
if let Some(ids) = &self.ids {
@ -68,8 +69,8 @@ impl Serialize for ReqFilter {
}
// serialize tags
if let Some(tags) = &self.tags {
for (k,v) in tags {
let vals:Vec<&String> = v.iter().collect();
for (k, v) in tags {
let vals: Vec<&String> = v.iter().collect();
map.serialize_entry(&format!("#{k}"), &vals)?;
}
}
@ -79,8 +80,8 @@ impl Serialize for ReqFilter {
impl<'de> Deserialize<'de> for ReqFilter {
fn deserialize<D>(deserializer: D) -> Result<ReqFilter, D::Error>
where
D: Deserializer<'de>,
where
D: Deserializer<'de>,
{
let received: Value = Deserialize::deserialize(deserializer)?;
let filter = received.as_object().ok_or_else(|| {
@ -105,15 +106,16 @@ impl<'de> Deserialize<'de> for ReqFilter {
for (key, val) in filter {
// ids
if key == "ids" {
let raw_ids: Option<Vec<String>>= Deserialize::deserialize(val).ok();
let raw_ids: Option<Vec<String>> = Deserialize::deserialize(val).ok();
if let Some(a) = raw_ids.as_ref() {
if a.contains(&empty_string) {
return Err(serde::de::Error::invalid_type(
Unexpected::Other("prefix matches must not be empty strings"),
&"a json object"));
&"a json object",
));
}
}
rf.ids =raw_ids;
rf.ids = raw_ids;
} else if key == "kinds" {
rf.kinds = Deserialize::deserialize(val).ok();
} else if key == "since" {
@ -123,12 +125,13 @@ impl<'de> Deserialize<'de> for ReqFilter {
} else if key == "limit" {
rf.limit = Deserialize::deserialize(val).ok();
} else if key == "authors" {
let raw_authors: Option<Vec<String>>= Deserialize::deserialize(val).ok();
let raw_authors: Option<Vec<String>> = Deserialize::deserialize(val).ok();
if let Some(a) = raw_authors.as_ref() {
if a.contains(&empty_string) {
return Err(serde::de::Error::invalid_type(
Unexpected::Other("prefix matches must not be empty strings"),
&"a json object"));
&"a json object",
));
}
}
rf.authors = raw_authors;
@ -181,11 +184,11 @@ impl<'de> Deserialize<'de> for Subscription {
/// Custom deserializer for subscriptions, which have a more
/// complex structure than the other message types.
fn deserialize<D>(deserializer: D) -> Result<Subscription, D::Error>
where
D: Deserializer<'de>,
where
D: Deserializer<'de>,
{
let mut v: Value = Deserialize::deserialize(deserializer)?;
// this shoud be a 3-or-more element array.
// this should be a 3-or-more element array.
// verify the first element is a String, REQ
// get the subscription from the second element.
// convert each of the remaining objects into filters
@ -232,19 +235,22 @@ impl<'de> Deserialize<'de> for Subscription {
impl Subscription {
/// Get a copy of the subscription identifier.
#[must_use] pub fn get_id(&self) -> String {
#[must_use]
pub fn get_id(&self) -> String {
self.id.clone()
}
/// Determine if any filter is requesting historical (database)
/// queries. If every filter has limit:0, we do not need to query the DB.
#[must_use] pub fn needs_historical_events(&self) -> bool {
self.filters.iter().any(|f| f.limit!=Some(0))
#[must_use]
pub fn needs_historical_events(&self) -> bool {
self.filters.iter().any(|f| f.limit != Some(0))
}
/// Determine if this subscription matches a given [`Event`]. Any
/// individual filter match is sufficient.
#[must_use] pub fn interested_in_event(&self, event: &Event) -> bool {
#[must_use]
pub fn interested_in_event(&self, event: &Event) -> bool {
for f in &self.filters {
if f.interested_in_event(event) {
return true;
@ -252,6 +258,29 @@ impl Subscription {
}
false
}
/// Is this subscription defined as a scraper query
pub fn is_scraper(&self) -> bool {
for f in &self.filters {
let mut precision = 0;
if f.ids.is_some() {
precision += 2;
}
if f.authors.is_some() {
precision += 1;
}
if f.kinds.is_some() {
precision += 1;
}
if f.tags.is_some() {
precision += 1;
}
if precision < 2 {
return true;
}
}
false
}
}
fn prefix_match(prefixes: &[String], target: &str) -> bool {
@ -305,17 +334,16 @@ impl ReqFilter {
/// Check if this filter either matches, or does not care about the kind.
fn kind_match(&self, kind: u64) -> bool {
self.kinds
.as_ref()
.map_or(true, |ks| ks.contains(&kind))
self.kinds.as_ref().map_or(true, |ks| ks.contains(&kind))
}
/// Determine if all populated fields in this filter match the provided event.
#[must_use] pub fn interested_in_event(&self, event: &Event) -> bool {
#[must_use]
pub fn interested_in_event(&self, event: &Event) -> bool {
// self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
self.ids_match(event)
&& self.since.map_or(true, |t| event.created_at > t)
&& self.until.map_or(true, |t| event.created_at < t)
&& self.since.map_or(true, |t| event.created_at >= t)
&& self.until.map_or(true, |t| event.created_at <= t)
&& self.kind_match(event.kind)
&& (self.authors_match(event) || self.delegated_authors_match(event))
&& self.tag_match(event)
@ -333,7 +361,7 @@ mod tests {
let s: Subscription = serde_json::from_str(raw_json)?;
assert_eq!(s.id, "some-id");
assert_eq!(s.filters.len(), 1);
assert_eq!(s.filters.get(0).unwrap().authors, None);
assert_eq!(s.filters.first().unwrap().authors, None);
Ok(())
}
@ -397,7 +425,7 @@ mod tests {
let s: Subscription = serde_json::from_str(raw_json)?;
assert_eq!(s.id, "some-id");
assert_eq!(s.filters.len(), 1);
let first_filter = s.filters.get(0).unwrap();
let first_filter = s.filters.first().unwrap();
assert_eq!(
first_filter.authors,
Some(vec!("test-author-id".to_owned()))
@ -625,12 +653,14 @@ mod tests {
#[test]
fn serialize_filter() -> Result<()> {
let s: Subscription = serde_json::from_str(r##"["REQ","xyz",{"authors":["abc", "bcd"], "since": 10, "until": 20, "limit":100, "#e": ["foo", "bar"], "#d": ["test"]}]"##)?;
let f = s.filters.get(0);
let s: Subscription = serde_json::from_str(
r##"["REQ","xyz",{"authors":["abc", "bcd"], "since": 10, "until": 20, "limit":100, "#e": ["foo", "bar"], "#d": ["test"]}]"##,
)?;
let f = s.filters.first();
let serialized = serde_json::to_string(&f)?;
let serialized_wrapped = format!(r##"["REQ", "xyz",{}]"##, serialized);
let parsed: Subscription = serde_json::from_str(&serialized_wrapped)?;
let parsed_filter = parsed.filters.get(0);
let parsed_filter = parsed.filters.first();
if let Some(pf) = parsed_filter {
assert_eq!(pf.since, Some(10));
assert_eq!(pf.until, Some(20));
@ -640,4 +670,14 @@ mod tests {
}
Ok(())
}
#[test]
fn is_scraper() -> Result<()> {
assert!(serde_json::from_str::<Subscription>(r#"["REQ","some-id",{"kinds": [1984],"since": 123,"limit":1}]"#)?.is_scraper());
assert!(serde_json::from_str::<Subscription>(r#"["REQ","some-id",{"kinds": [1984]},{"kinds": [1984],"authors":["aaaa"]}]"#)?.is_scraper());
assert!(!serde_json::from_str::<Subscription>(r#"["REQ","some-id",{"kinds": [1984],"authors":["aaaa"]}]"#)?.is_scraper());
assert!(!serde_json::from_str::<Subscription>(r#"["REQ","some-id",{"ids": ["aaaa"]}]"#)?.is_scraper());
assert!(!serde_json::from_str::<Subscription>(r##"["REQ","some-id",{"#p": ["aaaa"],"kinds":[1,4]}]"##)?.is_scraper());
Ok(())
}
}

View File

@ -1,9 +1,11 @@
//! Common utility functions
use bech32::FromBase32;
use std::time::SystemTime;
use url::Url;
/// Seconds since 1970.
#[must_use] pub fn unix_time() -> u64 {
#[must_use]
pub fn unix_time() -> u64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
@ -11,7 +13,8 @@ use std::time::SystemTime;
}
/// Check if a string contains only hex characters.
#[must_use] pub fn is_hex(s: &str) -> bool {
#[must_use]
pub fn is_hex(s: &str) -> bool {
s.chars().all(|x| char::is_ascii_hexdigit(&x))
}
@ -27,12 +30,19 @@ pub fn nip19_to_hex(s: &str) -> Result<String, bech32::Error> {
}
/// Check if a string contains only lower-case hex chars.
#[must_use] pub fn is_lower_hex(s: &str) -> bool {
#[must_use]
pub fn is_lower_hex(s: &str) -> bool {
s.chars().all(|x| {
(char::is_ascii_lowercase(&x) || char::is_ascii_digit(&x)) && char::is_ascii_hexdigit(&x)
})
}
pub fn host_str(url: &str) -> Option<String> {
Url::parse(url)
.ok()
.and_then(|u| u.host_str().map(|s| s.to_string()))
}
#[cfg(test)]
mod tests {
use super::*;
@ -40,15 +50,15 @@ mod tests {
#[test]
fn lower_hex() {
let hexstr = "abcd0123";
assert_eq!(is_lower_hex(hexstr), true);
assert!(is_lower_hex(hexstr));
}
#[test]
fn nip19() {
let hexkey = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d";
let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6";
assert_eq!(is_nip19(hexkey), false);
assert_eq!(is_nip19(nip19key), true);
assert!(!is_nip19(hexkey));
assert!(is_nip19(nip19key));
}
#[test]

View File

@ -7,4 +7,4 @@ mod tests {
use clap::CommandFactory;
CLIArgs::command().debug_assert();
}
}
}

View File

@ -103,8 +103,5 @@ fn get_available_port() -> Option<u16> {
}
pub fn port_is_available(port: u16) -> bool {
info!("checking on port {}", port);
match TcpListener::bind(("127.0.0.1", port)) {
Ok(_) => true,
Err(_) => false,
}
TcpListener::bind(("127.0.0.1", port)).is_ok()
}

356
tests/conn.rs Normal file
View File

@ -0,0 +1,356 @@
#[cfg(test)]
mod tests {
use bitcoin_hashes::hex::ToHex;
use bitcoin_hashes::sha256;
use bitcoin_hashes::Hash;
use secp256k1::rand;
use secp256k1::{KeyPair, Secp256k1, XOnlyPublicKey};
use nostr_rs_relay::conn::ClientConn;
use nostr_rs_relay::error::Error;
use nostr_rs_relay::event::Event;
use nostr_rs_relay::utils::unix_time;
const RELAY: &str = "wss://nostr.example.com/";
#[test]
fn test_generate_auth_challenge() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let last_auth_challenge = client_conn.auth_challenge().cloned();
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_ne!(
client_conn.auth_challenge().unwrap(),
&last_auth_challenge.unwrap()
);
assert_eq!(client_conn.auth_pubkey(), None);
}
#[test]
fn test_authenticate_with_valid_event() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap();
let event = auth_event(challenge);
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Ok(())));
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey));
}
#[test]
fn test_fail_to_authenticate_in_invalid_state() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let event = auth_event(&"challenge".into());
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_authenticate_when_already_authenticated() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap().clone();
let event = auth_event(&challenge);
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Ok(())));
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey));
let event1 = auth_event(&challenge);
let result1 = client_conn.authenticate(&event1, RELAY);
assert!(matches!(result1, Ok(())));
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey));
assert_ne!(client_conn.auth_pubkey(), Some(&event1.pubkey));
}
#[test]
fn test_fail_to_authenticate_with_invalid_event() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap();
let mut event = auth_event(challenge);
event.sig = event.sig.chars().rev().collect::<String>();
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_with_invalid_event_kind() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap();
let event = auth_event_with_kind(challenge, 9999999999999999);
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_with_expired_timestamp() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap();
let event = auth_event_with_created_at(challenge, unix_time() - 1200); // 20 minutes
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_with_future_timestamp() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap();
let event = auth_event_with_created_at(challenge, unix_time() + 1200); // 20 minutes
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_without_tags() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let event = auth_event_without_tags();
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_without_challenge() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let event = auth_event_without_challenge();
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_without_relay() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap();
let event = auth_event_without_relay(challenge);
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_with_invalid_challenge() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let event = auth_event(&"invalid challenge".into());
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
#[test]
fn test_fail_to_authenticate_with_invalid_relay() {
let mut client_conn = ClientConn::new("127.0.0.1".into());
assert_eq!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
client_conn.generate_auth_challenge();
assert_ne!(client_conn.auth_challenge(), None);
assert_eq!(client_conn.auth_pubkey(), None);
let challenge = client_conn.auth_challenge().unwrap();
let event = auth_event_with_relay(challenge, &"xyz".into());
let result = client_conn.authenticate(&event, RELAY);
assert!(matches!(result, Err(Error::AuthFailure)));
}
fn auth_event(challenge: &String) -> Event {
create_auth_event(Some(challenge), Some(&RELAY.into()), 22242, unix_time())
}
fn auth_event_with_kind(challenge: &String, kind: u64) -> Event {
create_auth_event(Some(challenge), Some(&RELAY.into()), kind, unix_time())
}
fn auth_event_with_created_at(challenge: &String, created_at: u64) -> Event {
create_auth_event(Some(challenge), Some(&RELAY.into()), 22242, created_at)
}
fn auth_event_without_challenge() -> Event {
create_auth_event(None, Some(&RELAY.into()), 22242, unix_time())
}
fn auth_event_without_relay(challenge: &String) -> Event {
create_auth_event(Some(challenge), None, 22242, unix_time())
}
fn auth_event_without_tags() -> Event {
create_auth_event(None, None, 22242, unix_time())
}
fn auth_event_with_relay(challenge: &String, relay: &String) -> Event {
create_auth_event(Some(challenge), Some(relay), 22242, unix_time())
}
fn create_auth_event(
challenge: Option<&String>,
relay: Option<&String>,
kind: u64,
created_at: u64,
) -> Event {
let secp = Secp256k1::new();
let key_pair = KeyPair::new(&secp, &mut rand::thread_rng());
let public_key = XOnlyPublicKey::from_keypair(&key_pair);
let mut tags: Vec<Vec<String>> = vec![];
if let Some(c) = challenge {
let tag = vec!["challenge".into(), c.into()];
tags.push(tag);
}
if let Some(r) = relay {
let tag = vec!["relay".into(), r.into()];
tags.push(tag);
}
let mut event = Event {
id: "0".to_owned(),
pubkey: public_key.to_hex(),
delegated_by: None,
created_at,
kind,
tags,
content: "".to_owned(),
sig: "0".to_owned(),
tagidx: None,
};
let c = event.to_canonical().unwrap();
let digest: sha256::Hash = sha256::Hash::hash(c.as_bytes());
let msg = secp256k1::Message::from_slice(digest.as_ref()).unwrap();
let sig = secp.sign_schnorr(&msg, &key_pair);
event.id = format!("{digest:x}");
event.sig = sig.to_hex();
event
}
}

View File

@ -1,8 +1,10 @@
use anyhow::Result;
use futures::SinkExt;
use futures::StreamExt;
use std::thread;
use std::time::Duration;
use tokio_tungstenite::connect_async;
use tracing::info;
mod common;
#[tokio::test]
@ -45,3 +47,33 @@ async fn relay_home_page() -> Result<()> {
let _res = relay.shutdown_tx.send(());
Ok(())
}
//#[tokio::test]
// Still inwork
async fn publish_test() -> Result<()> {
// get a relay and wait for startup
let relay = common::start_relay()?;
common::wait_for_healthy_relay(&relay).await?;
// open a non-secure websocket connection.
let (mut ws, _res) = connect_async(format!("ws://localhost:{}", relay.port)).await?;
// send a simple pre-made message
let simple_event = r#"["EVENT", {"content": "hello world","created_at": 1691239763,
"id":"f3ce6798d70e358213ebbeba4886bbdfacf1ecfd4f65ee5323ef5f404de32b86",
"kind": 1,
"pubkey": "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798",
"sig": "30ca29e8581eeee75bf838171dec818af5e6de2b74f5337de940f5cc91186534c0b20d6cf7ad1043a2c51dbd60b979447720a471d346322103c83f6cb66e4e98",
"tags": []}]"#;
ws.send(simple_event.into()).await?;
// get response from server, confirm it is an array with first element "OK"
let event_confirm = ws.next().await;
ws.close(None).await?;
info!("event confirmed: {:?}", event_confirm);
// open a new connection, and wait for some time to get the event.
let (mut sub_ws, _res) = connect_async(format!("ws://localhost:{}", relay.port)).await?;
let event_sub = r#"["REQ", "simple", {}]"#;
sub_ws.send(event_sub.into()).await?;
// read from subscription
let _ws_next = sub_ws.next().await;
let _res = relay.shutdown_tx.send(());
Ok(())
}