Compare commits

...

198 Commits
0.7.6 ... 0.8.5

Author SHA1 Message Date
Greg Heartsfield
692925942a build: bump version to 0.8.5 2023-02-13 17:53:33 -06:00
Greg Heartsfield
84afd4b64e refactor: whitespace 2023-02-13 17:52:00 -06:00
Greg Heartsfield
46160bb1f9 fix: correct name of gRPC configuration in toml 2023-02-13 17:30:26 -06:00
Greg Heartsfield
2fc9168a38 fix: SQL error with parameterized replaceable events 2023-02-13 17:10:42 -06:00
Greg Heartsfield
01d0d44868 build: bump version to 0.8.4 2023-02-13 09:34:30 -06:00
Greg Heartsfield
93f6337fda fix: upgrade docker image to include OpenSSL 3 2023-02-13 09:33:14 -06:00
Greg Heartsfield
f3a42712a6 build: bump version to 0.8.3 2023-02-13 08:08:28 -06:00
Greg Heartsfield
27361d064a improvement: upgrade multiple dependencies
Updating anyhow v1.0.68 -> v1.0.69
Updating axum v0.6.4 -> v0.6.6
Updating cxx v1.0.89 -> v1.0.90
Updating cxx-build v1.0.89 -> v1.0.90
Updating cxxbridge-flags v1.0.89 -> v1.0.90
Updating cxxbridge-macro v1.0.89 -> v1.0.90
Adding hermit-abi v0.3.1
Updating is-terminal v0.4.2 -> v0.4.3
Updating pest v2.5.4 -> v2.5.5
Updating pest_derive v2.5.4 -> v2.5.5
Updating pest_generator v2.5.4 -> v2.5.5
Updating pest_meta v2.5.4 -> v2.5.5
Updating proc-macro2 v1.0.50 -> v1.0.51
Updating raw-cpuid v10.6.0 -> v10.6.1
Updating rustix v0.36.7 -> v0.36.8
Updating serde_json v1.0.91 -> v1.0.93
Updating signal-hook-registry v1.4.0 -> v1.4.1
Updating thread_local v1.1.4 -> v1.1.7
Updating tinyvec_macros v0.1.0 -> v0.1.1
Updating tokio-native-tls v0.3.0 -> v0.3.1
Updating tokio-util v0.7.4 -> v0.7.7
2023-02-13 07:57:14 -06:00
Greg Heartsfield
3bafb611e5 build: install packages with sudo for github ci 2023-02-13 07:50:48 -06:00
Greg Heartsfield
b960ab70de build: add protobuf compiler to github ci workflow 2023-02-13 07:48:09 -06:00
Greg Heartsfield
15e2f097aa improvement: advise operator this upgrade may take a minute 2023-02-13 07:37:13 -06:00
Greg Heartsfield
185f9e7abb feat: improved query performance when looking for deletion events (improves event insert time) 2023-02-12 15:43:22 -06:00
Greg Heartsfield
f44dae6ac9 fix: use correct start time for logging SQL generation 2023-02-12 15:00:50 -06:00
Greg Heartsfield
abc356c17d perf(sqlite): index tags with their kind/created_at fields
This updates the DB schema to remove the distinction between hex and
non-hex tag values, for simplicity.  The space savings did not seem to
be worth the extra complexity.

The SQLite tags table is denormalized to duplicate kind/created_at to
improve the ability of tag indexes to filter data.
2023-02-12 14:33:40 -06:00
Greg Heartsfield
81f8256c37 fix: container builds support protobuf compilation 2023-02-11 14:30:42 -06:00
Greg Heartsfield
b3db2bd081 fix: protobuf compiler not needed in runtime container 2023-02-11 13:57:53 -06:00
Greg Heartsfield
d31e974d56 fix: add protobuf-compiler for Docker and CI builds 2023-02-11 13:56:15 -06:00
Greg Heartsfield
36eaf9fea5 improvement: make comments match code for nauthz example 2023-02-11 13:36:10 -06:00
Greg Heartsfield
a16c4e698a feat: gRPC authorization for events
closes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/46
2023-02-11 13:26:08 -06:00
Greg Heartsfield
e63d179424 fix: prevent loop when nip05 metadata channel closes 2023-02-11 13:26:08 -06:00
rorp
28b7b83a6e improvement: make config file location configurable via CLI args 2023-02-08 07:59:26 -06:00
Greg Heartsfield
2e42b1b86e improvement: log source IP for persisted events 2023-02-06 17:15:27 -06:00
Naoki Ikeguchi
bd07a11f50 refactor: Fix clippy warnings 2023-02-06 07:29:45 -06:00
Greg Heartsfield
bc4b45d4b8 docs: update DB maintenance for v0.8.x 2023-02-06 07:07:23 -06:00
thesimplekid
1ca5d652de format: postgres_migrations 2023-02-06 06:44:57 -06:00
thesimplekid
d7cceab8fc fix: tag table does not have a unique constraint
`cargo fmt` on the document.
2023-02-06 06:44:57 -06:00
Greg Heartsfield
2805a96e5b docs: nginx timeouts
suggested by Michael Dilger;
ref: https://snort.social/e/note15jtrt8zsrvckyv6hggmanwk43p50gvmxe30s62x9tt6x9hyruzaq6fca44
2023-02-05 17:17:13 -06:00
Greg Heartsfield
ac14a0759f docs: clarify wording around subscription limits 2023-02-03 13:08:31 -06:00
Greg Heartsfield
cdd4e5949f fix: correctly log SQL generation time 2023-02-03 10:39:41 -06:00
Greg Heartsfield
5999009779 improvement: increase connection cache size 2023-02-02 18:34:30 -06:00
Greg Heartsfield
e36c791c53 improvement: prevent spilling temp indexes to disk 2023-02-02 18:15:14 -06:00
Greg Heartsfield
d95adbcb3d build: bump version to 0.8.2 2023-02-02 16:21:45 -06:00
Greg Heartsfield
509736c56d improvement: update multiple dependencies
Updating cxx v1.0.88 -> v1.0.89
Updating cxx-build v1.0.88 -> v1.0.89
Updating cxxbridge-flags v1.0.88 -> v1.0.89
Updating cxxbridge-macro v1.0.88 -> v1.0.89
Updating heck v0.4.0 -> v0.4.1
Updating hyper v0.14.23 -> v0.14.24
Updating io-lifetimes v1.0.4 -> v1.0.5
Updating js-sys v0.3.60 -> v0.3.61
Updating parking_lot_core v0.9.6 -> v0.9.7
Updating sync_wrapper v0.1.1 -> v0.1.2
Updating wasm-bindgen v0.2.83 -> v0.2.84
Updating wasm-bindgen-backend v0.2.83 -> v0.2.84
Updating wasm-bindgen-futures v0.4.33 -> v0.4.34
Updating wasm-bindgen-macro v0.2.83 -> v0.2.84
Updating wasm-bindgen-macro-support v0.2.83 -> v0.2.84
Updating wasm-bindgen-shared v0.2.83 -> v0.2.84
Updating web-sys v0.3.60 -> v0.3.61
2023-02-02 16:12:49 -06:00
Greg Heartsfield
8004ea9b44 fix(NIP-33): only delete older events with matching 'd' tags 2023-02-02 16:09:17 -06:00
Greg Heartsfield
866c239cc9 improvement: simplify SQL queries for tags 2023-02-02 12:24:10 -06:00
Greg Heartsfield
6012b57e95 improvement: log connection details at INFO level 2023-02-02 11:55:41 -06:00
Greg Heartsfield
559541b160 build: bump version to 0.8.1 2023-02-01 18:16:08 -06:00
Greg Heartsfield
facaed7805 improvement: guidance for subscription limits 2023-02-01 18:09:30 -06:00
Greg Heartsfield
ba4fcd072a improvement: allow queries to be cancelled earlier (before SQL execution) 2023-02-01 18:09:30 -06:00
Greg Heartsfield
2b79099cfe improvement: drop slow readers more quickly 2023-02-01 18:09:30 -06:00
Greg Heartsfield
eb1d2d717d improvement: log sleeps due to full query_tx 2023-02-01 18:09:30 -06:00
Greg Heartsfield
e5e03d4378 improvement: log slow filter query time 2023-02-01 18:09:30 -06:00
Greg Heartsfield
c377b136aa improvement: prometheus metric for db connections (sqlite) 2023-02-01 18:09:30 -06:00
Greg Heartsfield
bca5614a82 perf: hold database handle through all filters when querying 2023-02-01 18:09:30 -06:00
Greg Heartsfield
f7550b4c61 improvement: more precise log message 2023-02-01 18:09:30 -06:00
Greg Heartsfield
1623bacd0d improvement(NIP-33): advertise support for parameterized replaceable events 2023-02-01 18:09:27 -06:00
Greg Heartsfield
2bbde8ad09 build: upgrade Rust to 1.67.0 2023-02-01 08:02:50 -06:00
Greg Heartsfield
a42004c30c improvement: update multiple dependencies
Updating async-trait v0.1.61 -> v0.1.64
Updating axum v0.6.2 -> v0.6.4
Updating axum-core v0.3.1 -> v0.3.2
Updating bumpalo v3.11.1 -> v3.12.0
Updating bytes v1.3.0 -> v1.4.0
Updating cc v1.0.78 -> v1.0.79
Updating clap v4.1.1 -> v4.1.4
Updating crc v3.0.0 -> v3.0.1
Updating cxx v1.0.86 -> v1.0.88
Updating cxx-build v1.0.86 -> v1.0.88
Updating cxxbridge-flags v1.0.86 -> v1.0.88
Updating cxxbridge-macro v1.0.86 -> v1.0.88
Updating either v1.8.0 -> v1.8.1
Updating futures v0.3.25 -> v0.3.26
Updating futures-channel v0.3.25 -> v0.3.26
Updating futures-core v0.3.25 -> v0.3.26
Updating futures-executor v0.3.25 -> v0.3.26
Updating futures-io v0.3.25 -> v0.3.26
Updating futures-macro v0.3.25 -> v0.3.26
Updating futures-sink v0.3.25 -> v0.3.26
Updating futures-task v0.3.25 -> v0.3.26
Updating futures-util v0.3.25 -> v0.3.26
Updating pest v2.5.3 -> v2.5.4
Updating pest_derive v2.5.3 -> v2.5.4
Updating pest_generator v2.5.3 -> v2.5.4
Updating pest_meta v2.5.3 -> v2.5.4
Updating proc-macro2 v1.0.49 -> v1.0.50
Updating rustix v0.36.6 -> v0.36.7
Updating security-framework v2.7.0 -> v2.8.2
Updating security-framework-sys v2.6.1 -> v2.8.0
Updating tokio v1.24.1 -> v1.25.0
Updating toml v0.5.10 -> v0.5.11
Updating unicode-bidi v0.3.8 -> v0.3.10
Updating unicode-segmentation v1.10.0 -> v1.10.1
Updating uuid v1.2.2 -> v1.3.0
2023-02-01 07:54:21 -06:00
Greg Heartsfield
9dd97908cf build: bump version to 0.8.0 2023-02-01 07:52:24 -06:00
Greg Heartsfield
ab749e9cf0 improvement: log mixed string/blob tag queries 2023-02-01 07:49:46 -06:00
Greg Heartsfield
1820e9c689 perf: separate out blob and string tag queries 2023-02-01 07:13:29 -06:00
Greg Heartsfield
2d3a35fe30 perf: force event hash index if filter uses ids 2023-02-01 06:46:35 -06:00
Greg Heartsfield
9c77b06f79 improvement: dedupe filters in a REQ 2023-01-31 18:09:43 -06:00
Greg Heartsfield
c8e8b71b91 fix: use accurate timer for slow queries, and use 250ms as cutoff 2023-01-31 18:09:43 -06:00
Greg Heartsfield
6d57adef73 improvement: log filter in a reusable format for slow queries 2023-01-31 18:09:43 -06:00
Greg Heartsfield
111eb4a10c perf: prevent sqlite readers from capturing worker thread pool and impacting writer latency 2023-01-31 18:09:23 -06:00
Greg Heartsfield
214f152c5d improvement: provide reason for abort in prometheus metric 2023-01-30 18:40:47 -06:00
Greg Heartsfield
3fcaf97a15 improvement: move sqlite connection acquisition into blocking thread 2023-01-30 18:02:40 -06:00
Greg Heartsfield
cec501b37f improvement: start timing for each new filter execution 2023-01-30 18:02:40 -06:00
Greg Heartsfield
2557c7f69c improvement: run filters as separate queries, to reduce complexity on SQLite query planner 2023-01-30 18:02:40 -06:00
Greg Heartsfield
3979a94726 improvement: do not force query to use index when limit is specified 2023-01-30 18:02:40 -06:00
Greg Heartsfield
71bdbfb425 improvement: query and exit early for events that get immediately replaced 2023-01-30 18:02:40 -06:00
Greg Heartsfield
b6798f96b6 improvement: add prometheus metrics, renaming others 2023-01-30 18:02:28 -06:00
w3irdrobot
c1152ce430 improvement(NIP-19): identify and parse NIP-19 addresses 2023-01-29 18:55:30 -06:00
thesimplekid
6f1a4e7d76 fix: postgres create index before column exists 2023-01-29 18:32:42 -06:00
Greg Heartsfield
1804bee912 feat(NIP-33): parameterized replaceable events for postgres 2023-01-29 18:30:08 -06:00
Greg Heartsfield
34db91940c improvement: prometheus metrics for aborted queries 2023-01-28 16:05:58 -06:00
Greg Heartsfield
0859e535ed improvement: remove origin label from connections metric
The origin header is controlled by clients, and there is no expiration
of any values that appear.  We would need to whitelist a set of known
origins in order to track this without giving someone the ability to
exhaust memory.
2023-01-28 15:46:26 -06:00
Greg Heartsfield
bdd4e43df4 improvement: show errors when writing new sqlite db
Build the sqlite writer pool first, which will provide a better error
message in the event the database is not writeable or readable.
2023-01-28 14:02:20 -06:00
Greg Heartsfield
dfa6985f44 docs: postgresql and NIP-33 added to README 2023-01-27 20:25:24 -06:00
Greg Heartsfield
57e1b53c13 feat: postgres migration schema v2
This primarily deals with correctly handling tag values.
2023-01-27 20:13:47 -06:00
Greg Heartsfield
53f83aa923 improvement: delete, do not hide, replaceable events 2023-01-24 08:04:42 -06:00
Greg Heartsfield
34a8f99d61 build: bump release to RC 1 2023-01-24 08:04:42 -06:00
Greg Heartsfield
c8f7420334 feat(NIP-33): parameterized replaceable events 2023-01-24 08:04:42 -06:00
Greg Heartsfield
e2869e8fad fix(NIP-16): do not replace events unless they are newer 2023-01-24 08:04:42 -06:00
Greg Heartsfield
5c07b2eca5 refactor: event is_ephemeral method 2023-01-24 08:04:42 -06:00
Greg Heartsfield
25752abe6b fix: run postgres migration on startup 2023-01-24 08:04:37 -06:00
Kieran
16f6e974c8 feat: add support for PostgreSQL as a backend repository 2023-01-22 16:26:54 -06:00
Rasmus Schlunsen
744d467a28 build: add github CI and badge 2023-01-22 15:50:32 -06:00
Greg Heartsfield
b094fbcabd fix: integration tests working 2023-01-22 11:10:21 -06:00
Kieran
4121c872bc feat: prometheus metrics
Prometheus metrics exposed at /metrics
2023-01-22 11:08:12 -06:00
Greg Heartsfield
6489e685ab refactor: reformat and remove tabs 2023-01-22 10:06:44 -06:00
Greg Heartsfield
6800c2e39d improvement: add NostrRepo trait, with sqlite implementation
This is inspired by the work of
v0l (https://github.com/v0l/nostr-rs-relay/).

A new trait abstracts the storage layer with an async API.  Rusqlite
is still used with worker threads, but this allows for Postgresql or
other backends to be used.

There may be bugs, this has not been rigorously tested.
2023-01-22 09:49:49 -06:00
Greg Heartsfield
e996d4c009 improvement: default to having a event creation rate limit (5/sec) 2023-01-20 11:10:43 -06:00
Paul Rollo
2331c881d7 docs: typo in database-maintenance.md
Add a missing `"`
2023-01-20 10:49:03 -06:00
Greg Heartsfield
585fdd3884 fix: use data_dir from config.toml if present
fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/64
2023-01-16 17:21:12 -06:00
Greg Heartsfield
cf3e67500f build: bump version to 0.7.17 2023-01-15 15:48:39 -06:00
Greg Heartsfield
1d19442cfd improvement: upgrade multiple dependencies
Updating crates.io index
Updating async-trait v0.1.60 -> v0.1.61
Updating axum v0.6.1 -> v0.6.2
Updating axum-core v0.3.0 -> v0.3.1
Updating clap v4.0.32 -> v4.1.1
Updating clap_derive v4.0.21 -> v4.1.0
Updating clap_lex v0.3.0 -> v0.3.1
Updating cxx v1.0.85 -> v1.0.86
Updating cxx-build v1.0.85 -> v1.0.86
Updating cxxbridge-flags v1.0.85 -> v1.0.86
Updating cxxbridge-macro v1.0.85 -> v1.0.86
Updating io-lifetimes v1.0.3 -> v1.0.4
Updating nom v7.1.2 -> v7.1.3
Updating parking_lot_core v0.9.5 -> v0.9.6
Updating pest v2.5.2 -> v2.5.3
Updating pest_derive v2.5.2 -> v2.5.3
Updating pest_generator v2.5.2 -> v2.5.3
Updating pest_meta v2.5.2 -> v2.5.3
Updating prost v0.11.5 -> v0.11.6
Updating prost-derive v0.11.5 -> v0.11.6
Updating prost-types v0.11.5 -> v0.11.6
Updating regex v1.7.0 -> v1.7.1
Updating schannel v0.1.20 -> v0.1.21
Removing sha1 v0.10.5
Adding sha2 v0.10.6
Updating termcolor v1.1.3 -> v1.2.0
Updating tokio v1.23.1 -> v1.24.1
Updating try-lock v0.2.3 -> v0.2.4
Removing windows-sys v0.36.1
Updating windows_aarch64_gnullvm v0.42.0 -> v0.42.1
Removing windows_aarch64_msvc v0.36.1
Removing windows_aarch64_msvc v0.42.0
Adding windows_aarch64_msvc v0.42.1
Removing windows_i686_gnu v0.36.1
Removing windows_i686_gnu v0.42.0
Adding windows_i686_gnu v0.42.1
Removing windows_i686_msvc v0.36.1
Removing windows_i686_msvc v0.42.0
Adding windows_i686_msvc v0.42.1
Removing windows_x86_64_gnu v0.36.1
Removing windows_x86_64_gnu v0.42.0
Adding windows_x86_64_gnu v0.42.1
Updating windows_x86_64_gnullvm v0.42.0 -> v0.42.1
Removing windows_x86_64_msvc v0.36.1
Removing windows_x86_64_msvc v0.42.0
Adding windows_x86_64_msvc v0.42.1
2023-01-15 15:46:33 -06:00
Greg Heartsfield
13cc24b5cd improvement: log blacklisted events 2023-01-15 15:42:27 -06:00
Greg Heartsfield
f543957b34 improvement: clear out hidden events during schema upgrade 2023-01-15 15:27:41 -06:00
Greg Heartsfield
7021f102e8 improvement: delete replaceable events 2023-01-15 15:13:10 -06:00
Greg Heartsfield
fddbf321bc perf: add indexes and force their use (authors) 2023-01-15 10:52:49 -06:00
Greg Heartsfield
3e7f2e21df perf: force authors index to be used if possible 2023-01-15 10:23:46 -06:00
Greg Heartsfield
9d9c6c78d1 improvement: refuse to insert events that would automatically be hidden 2023-01-15 10:01:01 -06:00
Greg Heartsfield
703b2efe6e refactor: replaceable check in event 2023-01-15 09:18:53 -06:00
Greg Heartsfield
0db6487ce3 fix: allow tokio tracing to be enabled
fixes https://github.com/scsibug/nostr-rs-relay/issues/48
2023-01-14 09:47:23 -06:00
Rasmus Schlunsen
ba987d3212 docs: update example nginx configuration to ensure A+ rating
config from https://www.ssllabs.com/ssltest/
2023-01-14 09:33:40 -06:00
Rasmus Schlunsen
73f4f60cc7 improvement: use clap for command line args 2023-01-14 09:22:11 -06:00
Greg Heartsfield
d06d227ebe improvement: lower REQ logging and note possible truncation 2023-01-11 16:56:40 -06:00
Greg Heartsfield
3519488c4e improvement: lower logging for failed REQ parses 2023-01-10 07:41:49 -06:00
Greg Heartsfield
fbd3315110 improvement: log REQ messages at debug level 2023-01-09 22:12:20 -06:00
Greg Heartsfield
3d3d1bde53 refactor: clippy suggestions 2023-01-09 22:12:04 -06:00
Greg Heartsfield
ed336111bb improvement: alert before long-running migration 2023-01-09 22:11:25 -06:00
Greg Heartsfield
8aed572989 docs: add link to relay setup 2023-01-09 21:33:59 -06:00
Greg Heartsfield
62e8da689d fix: do not force kind_created_at_index when there are tags 2023-01-06 12:57:48 -06:00
Greg Heartsfield
807d1aa384 improvement: log index names used 2023-01-06 12:50:52 -06:00
Greg Heartsfield
66a55b55b9 perf: new index, manually selected when appropriate 2023-01-06 12:17:30 -06:00
Greg Heartsfield
76c77c3e56 feat: bulk loading script for importing events 2023-01-06 12:16:19 -06:00
Greg Heartsfield
50daab8a6f refactor: make a standalone re-tagging function 2023-01-06 06:57:56 -06:00
Greg Heartsfield
ffd4e6f997 build: bump version to 0.7.16 2023-01-04 17:28:05 -06:00
Greg Heartsfield
bbd716963e improvement: update multiple dependencies
Updating anyhow v1.0.67 -> v1.0.68
Updating cxx v1.0.84 -> v1.0.85
Updating cxx-build v1.0.84 -> v1.0.85
Updating cxxbridge-flags v1.0.84 -> v1.0.85
Updating cxxbridge-macro v1.0.84 -> v1.0.85
Updating hermit-abi v0.1.19 -> v0.2.6
Updating libc v0.2.138 -> v0.2.139
Updating nom v7.1.1 -> v7.1.2
Updating num_cpus v1.14.0 -> v1.15.0
Updating once_cell v1.16.0 -> v1.17.0
Updating openssl v0.10.44 -> v0.10.45
Updating openssl-sys v0.9.79 -> v0.9.80
Updating pest v2.5.1 -> v2.5.2
Updating pest_derive v2.5.1 -> v2.5.2
Updating pest_generator v2.5.1 -> v2.5.2
Updating pest_meta v2.5.1 -> v2.5.2
Updating proc-macro2 v1.0.48 -> v1.0.49
Updating prost v0.11.3 -> v0.11.5
Updating prost-derive v0.11.2 -> v0.11.5
Updating prost-types v0.11.2 -> v0.11.5
Updating quote v1.0.22 -> v1.0.23
Updating serde v1.0.151 -> v1.0.152
Updating serde_derive v1.0.151 -> v1.0.152
Updating serde_json v1.0.90 -> v1.0.91
Updating syn v1.0.106 -> v1.0.107
Updating tokio v1.23.0 -> v1.23.1
2023-01-04 17:26:22 -06:00
Greg Heartsfield
ca95e8cf22 docs(NIP-26): reflect NIP-26 being disabled in README 2023-01-04 16:54:52 -06:00
Greg Heartsfield
e9d2a2cbd0 perf(NIP-26): temporarily disable NIP-26 delegated events 2023-01-04 16:51:22 -06:00
Greg Heartsfield
39a945b493 perf: separate author/delegated_by queries, minor improvement 2023-01-04 16:51:17 -06:00
Greg Heartsfield
9a84dc19e9 perf: author/kind index added (schema v13) 2023-01-04 16:51:02 -06:00
Greg Heartsfield
20c4bb42eb fix: correct log message 2023-01-03 21:24:46 -06:00
JesterHodl
0e519f6b77 feat: add --help and --version flags
fixes: https://github.com/scsibug/nostr-rs-relay/issues/42
2023-01-03 17:39:21 -06:00
Greg Heartsfield
3dd0f2c9c6 fix: do not run auto_vacuum on read-only connections 2023-01-03 17:32:55 -06:00
Greg Heartsfield
b7c8737166 improvement: enable auto_vacuum on database creation 2023-01-03 06:22:43 -06:00
Greg Heartsfield
c0b112c094 improvement: enable auto_vacuum on connections 2023-01-03 06:22:04 -06:00
Greg Heartsfield
cb283ac316 fix: ensure that replaceable events are handled correctly regardless of order receieved 2023-01-02 17:18:11 -06:00
Greg Heartsfield
2c6ac69bfd docs: remove incorrect comment 2023-01-02 15:41:17 -06:00
Greg Heartsfield
d929ae2752 improvement: define websocket send queue (unlimited->1024) 2023-01-02 15:39:28 -06:00
Greg Heartsfield
14fe9f9ee1 improvement: remove pauses for backups, likely not needed w/ WAL compaction 2023-01-02 15:38:30 -06:00
0xtr
7774db8c47 feat: add event kind blacklist
Adds a list to the config where you can specify which event kinds to blacklist.
The blacklist check will run right after verifying that the pubkey is allowed
to post events to the relay.
2022-12-27 17:10:34 -06:00
Greg Heartsfield
104ef2b9e1 build: bump version to 0.7.15 2022-12-27 17:04:48 -06:00
Greg Heartsfield
c06139ec99 docs: start of database maintenance tips 2022-12-27 17:00:14 -06:00
Greg Heartsfield
19ec89593d improvement: drop queries that are running during a checkpoint 2022-12-27 15:24:10 -06:00
Greg Heartsfield
27902bc5f4 improvement: move reader mutex closer to DB connection acquisition 2022-12-27 10:28:56 -06:00
Greg Heartsfield
d2adddaee4 improvement: extend allowed wal_checkpoint timeout to 10 sec 2022-12-27 10:13:14 -06:00
Greg Heartsfield
b23b3ce8ec improvement: block new readers when WAL is large 2022-12-27 09:48:07 -06:00
Greg Heartsfield
5f9fe1ce59 improvement: do not send realtime only filters to the DB (limit:0) 2022-12-26 12:20:36 -06:00
Greg Heartsfield
6a8c4ed1b5 build: bump version to 0.7.14 2022-12-26 11:26:48 -06:00
Greg Heartsfield
966c853700 docs: non-docker quick start 2022-12-26 10:34:09 -06:00
Greg Heartsfield
65fd0ed08b feat: increase wal_checkpoint time when WAL is large 2022-12-26 10:03:51 -06:00
Greg Heartsfield
0b51675b38 improvement: change suggestion and default for max sqlite DB readers 2022-12-25 11:17:08 -06:00
Greg Heartsfield
2e22334631 refactor: formatting 2022-12-25 11:06:30 -06:00
Greg Heartsfield
cb2ac4bf0f improvement: give threads unique names 2022-12-25 10:47:32 -06:00
Greg Heartsfield
38dc7789dc improvement: cleaner slow query logs 2022-12-25 10:47:32 -06:00
Greg Heartsfield
ce0e00ffb3 feat: log reader DB pool stats every minute 2022-12-25 10:47:32 -06:00
Greg Heartsfield
3e4ae4aeec feat: cache prepared statements and trace expanded SQL queries 2022-12-25 10:47:32 -06:00
Greg Heartsfield
c6a8807485 improvement: send error on empty-string prefix author/id searches 2022-12-25 10:47:32 -06:00
Greg Heartsfield
8137b6211c refactor: clippy suggestions 2022-12-24 10:29:47 -06:00
Greg Heartsfield
29effaae23 build: remove pre-commit rustfmt check 2022-12-24 10:29:30 -06:00
Greg Heartsfield
e5074f2e46 feat(NIP-28): replaceable kind 41 channel metadata events 2022-12-24 10:14:43 -06:00
Blake Jakopovic
4fd7643907 feat: change pub(crate) to pub for use as a library 2022-12-23 07:14:58 -06:00
Greg Heartsfield
1e1ec69175 build: remove unnecessary dockerfile mod script 2022-12-23 06:52:09 -06:00
benthecarman
e08647867c refactor: remove code duplication for simple_event 2022-12-23 06:39:50 -06:00
Greg Heartsfield
ae0f7171ed build: remove digest-locked docker base images 2022-12-23 06:30:59 -06:00
Greg Heartsfield
4f1a912f36 feat: log origin header from websocket requests
fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/33
2022-12-22 16:55:53 -06:00
Greg Heartsfield
95748647f0 build: bump version to 0.7.13 2022-12-22 16:27:34 -06:00
Greg Heartsfield
25480e837f fix: do not block writers for more than 1 second during checkpoints 2022-12-22 16:10:49 -06:00
Greg Heartsfield
b80b54cd9d improvement: reduce logging, especially for database pool size 2022-12-22 15:47:33 -06:00
Greg Heartsfield
8ea732cbe5 feat: perform regular database maintenance (60sec), without blocking main writer thread 2022-12-22 15:16:21 -06:00
Greg Heartsfield
0f68c4e5c2 refactor: formatting 2022-12-22 15:15:45 -06:00
Greg Heartsfield
dab2cd5792 wip: future changes to rustfmt 2022-12-22 15:13:54 -06:00
Greg Heartsfield
f411aa6fc2 fix: do not re-verify NIP-05 entries where metadata was deleted 2022-12-22 13:01:48 -06:00
Greg Heartsfield
d31bbda087 improvement: reduce lifetime of database connections 2022-12-22 13:01:12 -06:00
Greg Heartsfield
5917bc53b2 improvement: run maintenance every 60 seconds instead of by event count 2022-12-22 11:40:17 -06:00
Greg Heartsfield
91177c61a1 improvement: log reason for new event creation from nip05 2022-12-22 10:48:30 -06:00
Greg Heartsfield
53c2a8051c improvement: reduce logging 2022-12-22 10:29:27 -06:00
Greg Heartsfield
168cf513ac feat: perform full checkpoints and truncate WAL every 2k events 2022-12-22 10:11:05 -06:00
Greg Heartsfield
ea204761c9 fix: do not show slow queries more than once per sub 2022-12-20 15:41:50 -06:00
Greg Heartsfield
c270ae1434 improvement: reduce event count for db writer pauses 2022-12-20 15:25:24 -06:00
Greg Heartsfield
64bd983cb6 perf: every 5000 persisted events, pause for 500ms for backups
I have observed backups running for a very long time under heavy load,
this introduces some artificial delay to give the online backup enough
time to make progress.
2022-12-20 15:05:04 -06:00
Greg Heartsfield
1c153bc784 perf: shed DB query load when queue gets large 2022-12-20 13:23:21 -06:00
Greg Heartsfield
dc11d9a619 improvement: explicitly rollback transaction on duplicate event 2022-12-20 13:23:04 -06:00
Greg Heartsfield
cd1557787b improvement: log write pool 2022-12-20 13:21:57 -06:00
Greg Heartsfield
86bb7aeb9a improvement: function to check pool capacity 2022-12-20 10:07:01 -06:00
Greg Heartsfield
ce37fc1a2d build: bump version to 0.7.12 2022-12-19 14:50:42 -06:00
Greg Heartsfield
2cfd384339 perf: drop db handles that are not quickly read 2022-12-19 00:18:39 -06:00
Greg Heartsfield
8c013107f9 perf: increase upper bound for sqlite mmap 2022-12-18 23:19:43 -06:00
Greg Heartsfield
64a4466d30 perf: backing down on max_blocking_threads 2022-12-18 23:14:41 -06:00
Greg Heartsfield
1596c23eb4 perf: increase blocking threads now that contention is reduced 2022-12-18 22:46:32 -06:00
Greg Heartsfield
129badd4e1 perf: reduce per thread mmap allocation for DB 2022-12-18 22:45:32 -06:00
Greg Heartsfield
6f7c080180 improvement: reduce number of writer blocking threads from 4->2 2022-12-18 22:32:31 -06:00
Greg Heartsfield
af92561ef6 perf: remove shared cache mode (experiment) 2022-12-18 22:15:50 -06:00
Greg Heartsfield
d833a3e40d perf: reduce logging 2022-12-18 22:11:46 -06:00
Greg Heartsfield
462eb46642 build: bump version to 0.7.11 2022-12-18 20:52:01 -06:00
Greg Heartsfield
cf144d503d perf: reduce logging for slow queries 2022-12-18 20:47:11 -06:00
Greg Heartsfield
fb8375aef2 build: bump version to 0.7.10 2022-12-18 13:46:18 -06:00
Greg Heartsfield
88ac31b549 perf: increase channel size for DB communication 2022-12-18 13:44:28 -06:00
Greg Heartsfield
677b7d39e9 improvement: log slow requests that return zero results 2022-12-18 13:42:31 -06:00
Greg Heartsfield
b24d2f9aaa perf: set default blocking threads to lower value 2022-12-18 12:20:57 -06:00
Greg Heartsfield
7a3899d852 build: bump version to 0.7.9 2022-12-18 09:21:07 -06:00
Greg Heartsfield
818108b793 improvement: upgrade multiple dependencies
Updating anyhow v1.0.66 -> v1.0.67
Updating async-trait v0.1.59 -> v0.1.60
Updating cxx v1.0.83 -> v1.0.84
Updating cxx-build v1.0.83 -> v1.0.84
Updating cxxbridge-flags v1.0.83 -> v1.0.84
Updating cxxbridge-macro v1.0.83 -> v1.0.84
Updating itoa v1.0.4 -> v1.0.5
Updating link-cplusplus v1.0.7 -> v1.0.8
Updating proc-macro2 v1.0.47 -> v1.0.48
Updating quote v1.0.21 -> v1.0.22
Updating rustversion v1.0.9 -> v1.0.11
Updating ryu v1.0.11 -> v1.0.12
Updating scratch v1.0.2 -> v1.0.3
Updating serde v1.0.150 -> v1.0.151
Updating serde_derive v1.0.150 -> v1.0.151
Updating serde_json v1.0.89 -> v1.0.90
Updating syn v1.0.105 -> v1.0.106
Updating thiserror v1.0.37 -> v1.0.38
Updating thiserror-impl v1.0.37 -> v1.0.38
Updating unicode-ident v1.0.5 -> v1.0.6
2022-12-18 09:16:09 -06:00
Greg Heartsfield
d10348f7e1 feat: configurable blocking threads 2022-12-18 09:14:04 -06:00
Greg Heartsfield
8598e443d8 wip: add configuration for future feature (client concurrent db limits) 2022-12-17 23:19:48 -06:00
Greg Heartsfield
43222d44e5 feat: perform optimization after seeing many events 2022-12-17 23:18:54 -06:00
Greg Heartsfield
7c1516c4fb perf: add index for tags 2022-12-17 23:17:53 -06:00
Greg Heartsfield
0c72053a49 perf: increase mmap size to 1GB 2022-12-17 23:17:16 -06:00
Greg Heartsfield
3f32ff67ab improvement: minor logging 2022-12-17 23:11:14 -06:00
Greg Heartsfield
0b9778d6ca refactor: simplify tracking of subscriptions 2022-12-17 20:46:58 -06:00
Greg Heartsfield
9be04120c7 build: bump version to 0.7.8 2022-12-17 12:01:43 -06:00
Greg Heartsfield
cc06167e06 perf: add composite index for tag table 2022-12-17 12:01:20 -06:00
Greg Heartsfield
b6e33f044f improvement: limit db connection max lifetime 2022-12-17 10:47:35 -06:00
Greg Heartsfield
1b2c6f9fca build: bump version to 0.7.7 2022-12-17 10:09:44 -06:00
Greg Heartsfield
0d8d39ad22 feat: add rate limiting setting for subscription creation 2022-12-17 09:27:29 -06:00
44 changed files with 6675 additions and 1559 deletions

View File

@@ -7,6 +7,7 @@ environment:
packages:
- cargo
- sqlite-devel
- protobuf-compiler
sources:
- https://git.sr.ht/~gheartsfield/nostr-rs-relay/
shell: false

39
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
name: Test and build
on:
push:
branches:
- master
jobs:
test_nostr-rs-relay:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Update local toolchain
run: |
sudo apt-get install -y protobuf-compiler
rustup update
rustup component add clippy
rustup install nightly
- name: Toolchain info
run: |
cargo --version --verbose
rustc --version
cargo clippy --version
# - name: Lint
# run: |
# cargo fmt -- --check
# cargo clippy -- -D warnings
- name: Test
run: |
cargo check
cargo test --all
- name: Build
run: |
cargo build --release --locked

View File

@@ -11,6 +11,6 @@ repos:
- repo: https://github.com/doublify/pre-commit-rust
rev: v1.0
hooks:
- id: fmt
# - id: fmt
- id: cargo-check
- id: clippy

1439
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[package]
name = "nostr-rs-relay"
version = "0.7.6"
version = "0.8.5"
edition = "2021"
authors = ["Greg Heartsfield <scsibug@imap.cc>"]
description = "A relay implementation for the Nostr protocol"
@@ -12,9 +12,12 @@ keywords = ["nostr", "server"]
categories = ["network-programming", "web-programming"]
[dependencies]
clap = { version = "4.0.32", features = ["env", "default", "derive"]}
tracing = "0.1.36"
tracing-subscriber = "0.2.0"
tokio = { version = "1", features = ["full", "tracing", "signal"] }
prost = "0.11"
tonic = "0.8.3"
console-subscriber = "0.1.8"
futures = "0.3"
futures-util = "0.3"
@@ -28,7 +31,7 @@ secp256k1 = {version = "0.21", features = ["rand", "rand-std", "serde", "bitcoin
serde = { version = "1.0", features = ["derive"] }
serde_json = {version = "1.0", features = ["preserve_order"]}
hex = "0.4"
rusqlite = { version = "0.26", features = ["limits","bundled"]}
rusqlite = { version = "0.26", features = ["limits","bundled","modern_sqlite", "trace"]}
r2d2 = "0.8"
r2d2_sqlite = "0.19"
lazy_static = "1.4"
@@ -41,6 +44,16 @@ parse_duration = "2"
rand = "0.8"
const_format = "0.2.28"
regex = "1"
async-trait = "0.1.60"
async-std = "1.12.0"
sqlx = { version ="0.6.2", features=["runtime-tokio-rustls", "postgres", "chrono"]}
chrono = "0.4.23"
prometheus = "0.13.3"
indicatif = "0.17.3"
bech32 = "0.9.1"
[dev-dependencies]
anyhow = "1"
[build-dependencies]
tonic-build = { version="0.8.3", features = ["prost"] }

View File

@@ -1,5 +1,7 @@
FROM docker.io/library/rust:1.66.0@sha256:359949280cebefe93ccb33089fe25111a3aadfe99eac4b6cbe8ec3e1b571dacb as builder
FROM docker.io/library/rust:1-bookworm as builder
RUN apt-get update \
&& apt-get install -y cmake protobuf-compiler \
&& rm -rf /var/lib/apt/lists/*
RUN USER=root cargo install cargo-auditable
RUN USER=root cargo new --bin nostr-rs-relay
WORKDIR ./nostr-rs-relay
@@ -12,12 +14,14 @@ RUN rm src/*.rs
# copy project source code
COPY ./src ./src
COPY ./proto ./proto
COPY ./build.rs ./build.rs
# build auditable release using locked deps
RUN rm ./target/release/deps/nostr*relay*
RUN cargo auditable build --release --locked
FROM docker.io/library/debian:bullseye-20221205-slim@sha256:25f10b4f1ded5341a3ca0a30290ff3cd5639415f0c5a2222d5e7d5dd72952aa1
FROM docker.io/library/debian:bookworm-slim
ARG APP=/usr/src/app
ARG APP_DATA=/usr/src/app/db

View File

@@ -2,7 +2,8 @@
This is a [nostr](https://github.com/nostr-protocol/nostr) relay,
written in Rust. It currently supports the entire relay protocol, and
persists data with SQLite.
persists data with SQLite. There is experimental support for
Postgresql.
The project master repository is available on
[sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is
@@ -10,6 +11,9 @@ mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
[![builds.sr.ht status](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master.svg)](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master?)
![Github CI](https://github.com/schlunsen/nostr-rs-relay/actions/workflows/ci.yml/badge.svg)
## Features
[NIPs](https://github.com/nostr-protocol/nips) with a relay-specific implementation are listed here.
@@ -28,7 +32,9 @@ mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
- [x] NIP-16: [Event Treatment](https://github.com/nostr-protocol/nips/blob/master/16.md)
- [x] NIP-20: [Command Results](https://github.com/nostr-protocol/nips/blob/master/20.md)
- [x] NIP-22: [Event `created_at` limits](https://github.com/nostr-protocol/nips/blob/master/22.md) (_future-dated events only_)
- [x] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md)
- [ ] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md) (_implemented, but currently disabled_)
- [x] NIP-28: [Public Chat](https://github.com/nostr-protocol/nips/blob/master/28.md)
- [x] NIP-33: [Parameterized Replaceable Events](https://github.com/nostr-protocol/nips/blob/master/33.md)
## Quick Start
@@ -81,6 +87,38 @@ Text Note [81cf...2652] from 296a...9b92 5 seconds ago
A pre-built container is also available on DockerHub:
https://hub.docker.com/r/scsibug/nostr-rs-relay
## Build and Run (without Docker)
Building `nostr-rs-relay` requires an installation of Cargo & Rust: https://www.rust-lang.org/tools/install
Clone this repository, and then build a release version of the relay:
```console
$ git clone -q https://git.sr.ht/\~gheartsfield/nostr-rs-relay
$ cd nostr-rs-relay
$ cargo build -q -r
```
The relay executable is now located in
`target/release/nostr-rs-relay`. In order to run it with logging
enabled, execute it with the `RUST_LOG` variable set:
```console
$ RUST_LOG=warn,nostr_rs_relay=info ./target/release/nostr-rs-relay
Dec 26 10:31:56.455 INFO nostr_rs_relay: Starting up from main
Dec 26 10:31:56.464 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080
Dec 26 10:31:56.466 INFO nostr_rs_relay::server: db writer created
Dec 26 10:31:56.466 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=2)
Dec 26 10:31:56.466 INFO nostr_rs_relay::db: opened database "./nostr.db" for writing
Dec 26 10:31:56.466 INFO nostr_rs_relay::schema: DB version = 11
Dec 26 10:31:56.467 INFO nostr_rs_relay::db: Built a connection pool "maintenance writer" (min=1, max=2)
Dec 26 10:31:56.467 INFO nostr_rs_relay::server: control message listener started
Dec 26 10:31:56.468 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=8)
```
You now have a running relay, on port `8080`. Use a `nostr` client or
`websocat` to connect and send/query for events.
## Configuration
The sample [`config.toml`](config.toml) file demonstrates the
@@ -115,3 +153,8 @@ To chat about `nostr-rs-relay` on `nostr` itself; visit our channel on [anigma](
License
---
This project is MIT licensed.
External Documentation and Links
---
* [BlockChainCaffe's Nostr Relay Setup Guide](https://github.com/BlockChainCaffe/Nostr-Relay-Setup-Guide)

4
build.rs Normal file
View File

@@ -0,0 +1,4 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::compile_protos("proto/nauthz.proto")?;
Ok(())
}

View File

@@ -18,16 +18,20 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
[diagnostics]
# Enable tokio tracing (for use with tokio-console)
#tracing = true
#tracing = false
[database]
# Database engine (sqlite/postgres). Defaults to sqlite.
# Support for postgres is currently experimental.
#engine = "sqlite"
# Directory for SQLite files. Defaults to the current directory. Can
# also be specified (and overriden) with the "--db dirname" command
# line option.
data_directory = "."
#data_directory = "."
# Use an in-memory database instead of 'nostr.db'.
# Requires sqlite engine.
# Caution; this will not survive a process restart!
#in_memory = false
@@ -36,8 +40,23 @@ data_directory = "."
# Minimum number of SQLite reader connections
#min_conn = 4
# Maximum number of SQLite reader connections
#max_conn = 128
# Maximum number of SQLite reader connections. Recommend setting this
# to approx the number of cores.
#max_conn = 8
# Database connection string. Required for postgres; not used for
# sqlite.
#connection = "postgresql://postgres:nostr@localhost:7500/nostr"
[grpc]
# gRPC interfaces for externalized decisions and other extensions to
# functionality.
#
# Events can be authorized through an external service, by providing
# the URL below. In the event the server is not accessible, events
# will be permitted. The protobuf3 schema used is available in
# `proto/nauthz.proto`.
# event_admission_server = "http://[::1]:50051"
[network]
# Bind to this network address
@@ -62,8 +81,29 @@ reject_future_seconds = 1800
[limits]
# Limit events created per second, averaged over one minute. Must be
# an integer. If not set (or set to 0), defaults to unlimited.
#messages_per_sec = 0
# an integer. If not set (or set to 0), there is no limit. Note:
# this is for the server as a whole, not per-connection.
#
# Limiting event creation is highly recommended if your relay is
# public!
#
#messages_per_sec = 5
# Limit client subscriptions created, averaged over one minute. Must
# be an integer. If not set (or set to 0), defaults to unlimited.
# Strongly recommended to set this to a low value such as 10 to ensure
# fair service.
#subscriptions_per_min = 0
# UNIMPLEMENTED...
# Limit how many concurrent database connections a client can have.
# This prevents a single client from starting too many expensive
# database queries. Must be an integer. If not set (or set to 0),
# defaults to unlimited (subject to subscription limits).
#db_conns_per_client = 0
# Limit blocking threads used for database connections. Defaults to 16.
#max_blocking_threads = 16
# Limit the maximum size of an EVENT message. Defaults to 128 KB.
# Set to 0 for unlimited.
@@ -83,6 +123,11 @@ reject_future_seconds = 1800
# backpressure to senders if writes are slow.
#event_persist_buffer = 4096
# Event kind blacklist. Events with these kinds will be discarded.
#event_kind_blacklist = [
# 70202,
#]
[authorization]
# Pubkey addresses in this array are whitelisted for event publishing.
# Only valid events by these authors will be accepted, if the variable

View File

@@ -0,0 +1,122 @@
# Database Maintenance
`nostr-rs-relay` uses the SQLite embedded database to minimize
dependencies and overall footprint of running a relay. If traffic is
light, the relay should just run with very little need for
intervention. For heavily trafficked relays, there are a number of
steps that the operator may need to take to maintain performance and
limit disk usage.
This maintenance guide is current as of version `0.8.2`. Future
versions may incorporate and automate some of these steps.
## Backing Up the Database
To prevent data loss, the database should be backed up regularly. The
recommended method is to use the `sqlite3` command to perform an
"Online Backup". This can be done while the relay is running, queries
can still run and events will be persisted during the backup.
The following commands will perform a backup of the database to a
dated file, and then compress to minimize size:
```console
BACKUP_FILE=/var/backups/nostr/`date +%Y%m%d_%H%M`.db
sqlite3 -readonly /apps/nostr-relay/nostr.db ".backup $BACKUP_FILE"
sqlite3 $BACKUP_FILE "vacuum;"
bzip2 -9 $BACKUP_FILE
```
Nostr events are very compressible. Expect a compression ratio on the
order of 4:1, resulting in a 75% space saving.
## Vacuuming the Database
As the database is updated, it can become fragmented. Performing a
full `vacuum` will rebuild the entire database file, and can reduce
space. Running this may reduce the size of the database file,
especially if a large amount of data was updated or deleted.
```console
vacuum;
```
## Clearing Hidden Events
When events are deleted, the event is not actually removed from the
database. Instead, a flag `HIDDEN` is set to true for the event,
which excludes it from search results. High volume replacements from
profile or other replaceable events are deleted, not hidden, in the
current version of the relay.
In the current version, removing hidden events should not result in
significant space savings, but it can still be used if there is no
desire to hold on to events that can never be re-broadcast.
```console
PRAGMA foreign_keys = ON;
delete from event where HIDDEN=true;
```
## Manually Removing Events
For a variety of reasons, an operator may wish to remove some events
from the database. The only way of achieving this today is with
manually run SQL commands.
It is recommended to have a good backup prior to manually running SQL
commands!
In all cases, it is mandatory to enable foreign keys, and this must be
done for every connection. Otherwise, you will likely orphan rows in
the `tag` table.
### Deleting Specific Event
```console
PRAGMA foreign_keys = ON;
delete from event where event_hash=x'00000000000c1271675dc86e3e1dd1336827bccabb90dc4c9d3b4465efefe00e';
```
### Deleting All Events for Pubkey
```console
PRAGMA foreign_keys = ON;
delete from event where author=x'000000000002c7831d9c5a99f183afc2813a6f69a16edda7f6fc0ed8110566e6';
```
### Deleting All Events of a Kind
```console
PRAGMA foreign_keys = ON;
delete from event where kind=70202;
```
### Deleting Old Events
In this scenario, we wish to delete any event that has been stored by
our relay for more than 1 month. Crucially, this is based on when the
event was stored, not when the event says it was created. If an event
has a `created` field of 2 years ago, but was first sent to our relay
yesterday, it would not be deleted in this scenario. Keep in mind, we
do not track anything for re-broadcast events that we already have, so
this is not a very effective way of implementing a "least recently
seen" policy.
```console
PRAGMA foreign_keys = ON;
TODO!
```
### Delete Profile Events with No Recent Events
Many users create profiles, post a "hello world" event, and then never
appear again (likely using an ephemeral keypair that was lost in the
browser cache). We can find these accounts and remove them after some
time.
```console
PRAGMA foreign_keys = ON;
TODO!
```

79
docs/grpc-extensions.md Normal file
View File

@@ -0,0 +1,79 @@
# gRPC Extensions Design Document
The relay will be extensible through gRPC endpoints, definable in the
main configuration file. These will allow external programs to host
logic for deciding things such as, should this event be persisted,
should this connection be allowed, and should this subscription
request be registered. The primary goal is allow for relay operator
specific functionality that allows them to serve smaller communities
and reduce spam and abuse.
This will likely evolve substantially, the first goal is to get a
basic one-way service that lets an externalized program decide on
event persistance. This does not represent the final state of gRPC
extensibility in `nostr-rs-relay`.
## Considerations
Write event latency must not be significantly affected. However, the
primary reason we are implementing this is spam/abuse protection, so
we are willing to tolerate some increase in latency if that protects
us against outages!
The interface should provide enough information to make simple
decisions, without burdening the relay to do extra queries. The
decision endpoint will be mostly responsible for maintaining state and
gathering additional details.
## Design Overview
A gRPC server may be defined in the `config.toml` file. If it exists,
the relay will attempt to connect to it and send a message for each
`EVENT` command submitted by clients. If a successful response is
returned indicating the event is permitted, the relay continues
processing the event as normal. All existing whitelist, blacklist,
and `NIP-05` validation checks are still performed and MAY still
result in the event being rejected. If a successful response is
returned indicated the decision is anything other than permit, then
the relay MUST reject the event, and return a command result to the
user (using `NIP-20`) indicating the event was blocked (optionally
providing a message).
In the event there is an error in the gRPC interface, event processing
proceeds as if gRPC was disabled (fail open). This allows gRPC
servers to be deployed with minimal chance of causing a full relay
outage.
## Design Details
Currently one procedure call is supported, `EventAdmit`, in the
`Authorization` service. It accepts the following data in order to
support authorization decisions:
- The event itself
- The client IP that submitted the event
- The client's HTTP origin header, if one exists
- The client's HTTP user agent header, if one exists
- The public key of the client, if `NIP-42` authentication was
performed (not supported in the relay yet!)
- The `NIP-05` associated with the event's public key, if it is known
to the relay
A server providing authorization decisions will return the following:
- A decision to permit or deny the event
- An optional message that explains why the event was denied, to be
transmitted to the client
## Security Issues
There is little attempt to secure this interface, since it is intended
for use processes running on the same host. It is recommended to
ensure that the gRPC server providing the API is not exposed to the
public Internet. Authorization server implementations should have
their own security reviews performed.
A slow gRPC server could cause availability issues for event
processing, since this is performed on a single thread. Avoid any
expensive or long-running processes that could result from submitted
events, since any client can initiate a gRPC call to the service.

View File

@@ -1,3 +0,0 @@
#!/usr/bin/env bash
sed -E 's/@sha256:[[:alnum:]]+//g' Dockerfile > Dockerfile.any-platform
echo "Created platform-agnostic Dockerfile in 'Dockerfile.any-platform'"

1010
nauthz_server_example/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,13 @@
[package]
name = "nauthz-server"
version = "0.1.0"
edition = "2021"
[dependencies]
# Common dependencies
tokio = { version = "1.0", features = ["rt-multi-thread", "macros"] }
prost = "0.11"
tonic = "0.8.3"
[build-dependencies]
tonic-build = { version="0.8.3", features = ["prost"] }

View File

@@ -0,0 +1,4 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::compile_protos("../proto/nauthz.proto")?;
Ok(())
}

View File

@@ -0,0 +1,61 @@
use tonic::{transport::Server, Request, Response, Status};
use nauthz_grpc::authorization_server::{Authorization, AuthorizationServer};
use nauthz_grpc::{EventReply, EventRequest, Decision};
pub mod nauthz_grpc {
tonic::include_proto!("nauthz");
}
#[derive(Default)]
pub struct EventAuthz {
allowed_kinds: Vec<u64>,
}
#[tonic::async_trait]
impl Authorization for EventAuthz {
async fn event_admit(
&self,
request: Request<EventRequest>,
) -> Result<Response<EventReply>, Status> {
let reply;
let req = request.into_inner();
let event = req.event.unwrap();
let content_prefix:String = event.content.chars().take(40).collect();
println!("recvd event, [kind={}, origin={:?}, nip05_domain={:?}, tag_count={}, content_sample={:?}]",
event.kind, req.origin, req.nip05.map(|x| x.domain), event.tags.len(), content_prefix);
// Permit any event with a whitelisted kind
if self.allowed_kinds.contains(&event.kind) {
println!("This looks fine! (kind={})",event.kind);
reply = nauthz_grpc::EventReply {
decision: Decision::Permit as i32,
message: None
};
} else {
println!("Blocked! (kind={})",event.kind);
reply = nauthz_grpc::EventReply {
decision: Decision::Deny as i32,
message: Some(format!("kind {} not permitted", event.kind)),
};
}
Ok(Response::new(reply))
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let addr = "[::1]:50051".parse().unwrap();
// A simple authorization engine that allows kinds 0-3
let checker = EventAuthz {
allowed_kinds: vec![0,1,2,3],
};
println!("EventAuthz Server listening on {}", addr);
// Start serving
Server::builder()
.add_service(AuthorizationServer::new(checker))
.serve(addr)
.await?;
Ok(())
}

60
proto/nauthz.proto Normal file
View File

@@ -0,0 +1,60 @@
syntax = "proto3";
// Nostr Authorization Services
package nauthz;
// Authorization for actions against a relay
service Authorization {
// Determine if an event should be admitted to the relay
rpc EventAdmit(EventRequest) returns (EventReply) {}
}
message Event {
bytes id = 1; // 32-byte SHA256 hash of serialized event
bytes pubkey = 2; // 32-byte public key of event creator
fixed64 created_at = 3; // UNIX timestamp provided by event creator
uint64 kind = 4; // event kind
string content = 5; // arbitrary event contents
repeated TagEntry tags = 6; // event tag array
bytes sig = 7; // 32-byte signature of the event id
// Individual values for a single tag
message TagEntry {
repeated string values = 1;
}
}
// Event data and metadata for authorization decisions
message EventRequest {
Event event =
1; // the event to be admitted for further relay processing
optional string ip_addr =
2; // IP address of the client that submitted the event
optional string origin =
3; // HTTP origin header from the client, if one exists
optional string user_agent =
4; // HTTP user-agent header from the client, if one exists
optional bytes auth_pubkey =
5; // the public key associated with a NIP-42 AUTH'd session, if
// authentication occurred
optional Nip05Name nip05 =
6; // NIP-05 address associated with the event pubkey, if it is
// known and has been validated by the relay
// A NIP_05 verification record
message Nip05Name {
string local = 1;
string domain = 2;
}
}
// A permit or deny decision
enum Decision {
DECISION_UNSPECIFIED = 0;
DECISION_PERMIT = 1; // Admit this event for further processing
DECISION_DENY = 2; // Deny persisting or propagating this event
}
// Response to a event authorization request
message EventReply {
Decision decision = 1; // decision to enforce
optional string message = 2; // informative message for the client
}

View File

@@ -68,13 +68,32 @@ http {
server_name relay.example.com;
ssl_certificate /etc/letsencrypt/live/relay.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/relay.example.com/privkey.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_protocols TLSv1.3 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ecdh_curve secp521r1:secp384r1;
ssl_ciphers EECDH+AESGCM:EECDH+AES256;
# Optional Diffie-Helmann parameters
# Generate with openssl dhparam -out /etc/ssl/certs/dhparam.pem 4096
#ssl_dhparam /etc/ssl/certs/dhparam.pem;
ssl_session_cache shared:TLS:2m;
ssl_buffer_size 4k;
# OCSP stapling
ssl_stapling on;
ssl_stapling_verify on;
resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001]; # Cloudflare
# Set HSTS to 365 days
add_header Strict-Transport-Security 'max-age=31536000; includeSubDomains; preload' always;
keepalive_timeout 70;
location / {
proxy_pass http://localhost:8080;
proxy_http_version 1.1;
proxy_read_timeout 1d;
proxy_send_timeout 1d;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;

View File

@@ -1 +1,4 @@
edition = "2021"
#max_width = 140
#chain_width = 100
#fn_call_width = 100

175
src/bin/bulkloader.rs Normal file
View File

@@ -0,0 +1,175 @@
use std::io;
use std::path::Path;
use nostr_rs_relay::utils::is_lower_hex;
use tracing::info;
use nostr_rs_relay::config;
use nostr_rs_relay::event::{Event,single_char_tagname};
use nostr_rs_relay::error::{Error, Result};
use nostr_rs_relay::repo::sqlite::{PooledConnection, build_pool};
use nostr_rs_relay::repo::sqlite_migration::{curr_db_version, DB_VERSION};
use rusqlite::{OpenFlags, Transaction};
use std::sync::mpsc;
use std::thread;
use rusqlite::params;
/// Bulk load JSONL data from STDIN to the database specified in config.toml (or ./nostr.db as a default).
/// The database must already exist, this will not create a new one.
/// Tested against schema v13.
pub fn main() -> Result<()> {
let _trace_sub = tracing_subscriber::fmt::try_init();
println!("Nostr-rs-relay Bulk Loader");
// check for a database file, or create one.
let settings = config::Settings::new(&None);
if !Path::new(&settings.database.data_directory).is_dir() {
info!("Database directory does not exist");
return Err(Error::DatabaseDirError);
}
// Get a database pool
let pool = build_pool("bulk-loader", &settings, OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, 1,4,false);
{
// check for database schema version
let mut conn: PooledConnection = pool.get()?;
let version = curr_db_version(&mut conn)?;
info!("current version is: {:?}", version);
// ensure the schema version is current.
if version != DB_VERSION {
info!("version is not current, exiting");
panic!("cannot write to schema other than v{DB_VERSION}");
}
}
// this channel will contain parsed events ready to be inserted
let (event_tx, event_rx) = mpsc::sync_channel(100_000);
// Thread for reading events
let _stdin_reader_handler = thread::spawn(move || {
let stdin = io::stdin();
for readline in stdin.lines() {
if let Ok(line) = readline {
// try to parse a nostr event
let eres: Result<Event, serde_json::Error> = serde_json::from_str(&line);
if let Ok(mut e) = eres {
if let Ok(()) = e.validate() {
e.build_index();
//debug!("Event: {:?}", e);
event_tx.send(Some(e)).ok();
} else {
info!("could not validate event");
}
} else {
info!("error reading event: {:?}", eres);
}
} else {
// error reading
info!("error reading: {:?}", readline);
}
}
info!("finished parsing events");
event_tx.send(None).ok();
let ok: Result<()> = Ok(());
ok
});
let mut conn: PooledConnection = pool.get()?;
let mut events_read = 0;
let event_batch_size =50_000;
let mut new_events = 0;
let mut has_more_events = true;
while has_more_events {
// begin a transaction
let tx = conn.transaction()?;
// read in batch_size events and commit
for _ in 0..event_batch_size {
match event_rx.recv() {
Ok(Some(e)) => {
events_read += 1;
// ignore ephemeral events
if !(e.kind >= 20000 && e.kind < 30000) {
match write_event(&tx, e) {
Ok(c) => {
new_events += c;
},
Err(e) => {
info!("error inserting event: {:?}", e);
}
}
}
},
Ok(None) => {
// signal that the sender will never produce more
// events
has_more_events=false;
break;
},
Err(_) => {
info!("sender is closed");
// sender is done
}
}
}
info!("committed {} events...", new_events);
tx.commit()?;
conn.execute_batch("pragma wal_checkpoint(truncate)")?;
}
info!("processed {} events", events_read);
info!("stored {} new events", new_events);
// get a connection for writing events
// read standard in.
info!("finished reading input");
Ok(())
}
/// Write an event and update the tag table.
/// Assumes the event has its index built.
fn write_event(tx: &Transaction, e: Event) -> Result<usize> {
let id_blob = hex::decode(&e.id).ok();
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
let delegator_blob: Option<Vec<u8>> = e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
let event_str = serde_json::to_string(&e).ok();
// ignore if the event hash is a duplicate.
let ins_count = tx.execute(
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, strftime('%s','now'), FALSE);",
params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str]
)?;
if ins_count == 0 {
return Ok(0);
}
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_id = tx.last_insert_rowid();
// look at each event, and each tag, creating new tag entries if appropriate.
for t in e.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
// safe because len was > 1
let tagval = t.get(1).unwrap();
// insert as BLOB if we can restore it losslessly.
// this means it needs to be even length and lowercase.
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
tx.execute(
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
params![event_id, tagname, hex::decode(tagval).ok()],
)?;
} else {
// otherwise, insert as text
tx.execute(
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
params![event_id, tagname, &tagval],
)?;
}
}
if e.is_replaceable() {
//let query = "SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1;";
//let count: usize = tx.query_row(query, params![e.kind, pubkey_blob], |row| row.get(0))?;
//info!("found {} rows that /would/ be preserved", count);
match tx.execute(
"DELETE FROM event WHERE kind=? and author=? and id NOT IN (SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1);",
params![e.kind, pubkey_blob, e.kind, pubkey_blob],
) {
Ok(_) => {},
Err(x) => {info!("error deleting replaceable event: {:?}",x);}
}
}
Ok(ins_count)
}

20
src/cli.rs Normal file
View File

@@ -0,0 +1,20 @@
use clap::Parser;
#[derive(Parser)]
#[command(about = "A nostr relay written in Rust", author = env!("CARGO_PKG_AUTHORS"), version = env!("CARGO_PKG_VERSION"))]
pub struct CLIArgs {
#[arg(
short,
long,
help = "Use the <directory> as the location of the database",
required = false,
)]
pub db: Option<String>,
#[arg(
short,
long,
help = "Use the <file name> as the location of the config file",
required = false,
)]
pub config: Option<String>,
}

View File

@@ -18,9 +18,17 @@ pub struct Info {
#[allow(unused)]
pub struct Database {
pub data_directory: String,
pub engine: String,
pub in_memory: bool,
pub min_conn: u32,
pub max_conn: u32,
pub connection: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Grpc {
pub event_admission_server: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -52,11 +60,15 @@ pub struct Retention {
#[allow(unused)]
pub struct Limits {
pub messages_per_sec: Option<u32>, // Artificially slow down event writing to limit disk consumption (averaged over 1 minute)
pub subscriptions_per_min: Option<u32>, // Artificially slow down request (db query) creation to prevent abuse (averaged over 1 minute)
pub db_conns_per_client: Option<u32>, // How many concurrent database queries (not subscriptions) may a client have?
pub max_blocking_threads: usize,
pub max_event_bytes: Option<usize>, // Maximum size of an EVENT message
pub max_ws_message_bytes: Option<usize>,
pub max_ws_frame_bytes: Option<usize>,
pub broadcast_buffer: usize, // events to buffer for subscribers (prevents slow readers from consuming memory)
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
pub event_kind_blacklist: Option<Vec<u64>>
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -139,6 +151,7 @@ pub struct Settings {
pub info: Info,
pub diagnostics: Diagnostics,
pub database: Database,
pub grpc: Grpc,
pub network: Network,
pub limits: Limits,
pub authorization: Authorization,
@@ -149,10 +162,10 @@ pub struct Settings {
impl Settings {
#[must_use]
pub fn new() -> Self {
pub fn new(config_file_name: &Option<String>) -> Self {
let default_settings = Self::default();
// attempt to construct settings with file
let from_file = Self::new_from_default(&default_settings);
let from_file = Self::new_from_default(&default_settings, config_file_name);
match from_file {
Ok(f) => f,
Err(e) => {
@@ -162,13 +175,19 @@ impl Settings {
}
}
fn new_from_default(default: &Settings) -> Result<Self, ConfigError> {
fn new_from_default(default: &Settings, config_file_name: &Option<String>) -> Result<Self, ConfigError> {
let default_config_file_name = "config.toml".to_string();
let config: &String = match config_file_name {
Some(value) => value,
None => &default_config_file_name
};
let builder = Config::builder();
let config: Config = builder
// use defaults
// use defaults
.add_source(Config::try_from(default)?)
// override with file contents
.add_source(File::with_name("config.toml"))
// override with file contents
.add_source(File::with_name(config))
.build()?;
let mut settings: Settings = config.try_deserialize()?;
// ensure connection pool size is logical
@@ -202,9 +221,14 @@ impl Default for Settings {
diagnostics: Diagnostics { tracing: false },
database: Database {
data_directory: ".".to_owned(),
engine: "sqlite".to_owned(),
in_memory: false,
min_conn: 4,
max_conn: 128,
max_conn: 8,
connection: "".to_owned(),
},
grpc: Grpc {
event_admission_server: None,
},
network: Network {
port: 8080,
@@ -214,11 +238,15 @@ impl Default for Settings {
},
limits: Limits {
messages_per_sec: None,
subscriptions_per_min: None,
db_conns_per_client: None,
max_blocking_threads: 16,
max_event_bytes: Some(2 << 17), // 128K
max_ws_message_bytes: Some(2 << 17), // 128K
max_ws_frame_bytes: Some(2 << 17), // 128K
broadcast_buffer: 16384,
event_persist_buffer: 4096,
event_kind_blacklist: None,
},
authorization: Authorization {
pubkey_whitelist: None, // Allow any address to publish

View File

@@ -5,7 +5,7 @@ use crate::error::Result;
use crate::subscription::Subscription;
use std::collections::HashMap;
use tracing::{debug, info};
use tracing::{debug, trace};
use uuid::Uuid;
/// A subscription identifier has a maximum length
@@ -14,7 +14,7 @@ const MAX_SUBSCRIPTION_ID_LEN: usize = 256;
/// State for a client connection
pub struct ClientConn {
/// Client IP (either from socket, or configured proxy header
client_ip: String,
client_ip_addr: String,
/// Unique client identifier generated at connection time
client_id: Uuid,
/// The current set of active client subscriptions
@@ -32,20 +32,25 @@ impl Default for ClientConn {
impl ClientConn {
/// Create a new, empty connection state.
#[must_use]
pub fn new(client_ip: String) -> Self {
pub fn new(client_ip_addr: String) -> Self {
let client_id = Uuid::new_v4();
ClientConn {
client_ip,
client_ip_addr,
client_id,
subscriptions: HashMap::new(),
max_subs: 32,
}
}
pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
#[must_use] pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
&self.subscriptions
}
/// Check if the given subscription already exists
#[must_use] pub fn has_subscription(&self, sub: &Subscription) -> bool {
self.subscriptions.values().any(|x| x == sub)
}
/// Get a short prefix of the client's unique identifier, suitable
/// for logging.
#[must_use]
@@ -55,7 +60,7 @@ impl ClientConn {
#[must_use]
pub fn ip(&self) -> &str {
&self.client_ip
&self.client_ip_addr
}
/// Add a new subscription for this connection.
@@ -69,7 +74,7 @@ impl ClientConn {
// prevent arbitrarily long subscription identifiers from
// being used.
if sub_id_len > MAX_SUBSCRIPTION_ID_LEN {
info!(
debug!(
"ignoring sub request with excessive length: ({})",
sub_id_len
);
@@ -79,7 +84,7 @@ impl ClientConn {
if self.subscriptions.contains_key(&k) {
self.subscriptions.remove(&k);
self.subscriptions.insert(k, s.clone());
debug!(
trace!(
"replaced existing subscription (cid: {}, sub: {:?})",
self.get_client_prefix(),
s.get_id()
@@ -93,7 +98,7 @@ impl ClientConn {
}
// add subscription
self.subscriptions.insert(k, s);
debug!(
trace!(
"registered new subscription, currently have {} active subs (cid: {})",
self.subscriptions.len(),
self.get_client_prefix(),
@@ -105,7 +110,7 @@ impl ClientConn {
pub fn unsubscribe(&mut self, c: &Close) {
// TODO: return notice if subscription did not exist.
self.subscriptions.remove(&c.id);
debug!(
trace!(
"removed subscription, currently have {} active subs (cid: {})",
self.subscriptions.len(),
self.get_client_prefix(),

891
src/db.rs
View File

@@ -1,30 +1,23 @@
//! Event persistence and querying
//use crate::config::SETTINGS;
use crate::config::Settings;
use crate::error::{Error, Result};
use crate::event::{single_char_tagname, Event};
use crate::hexrange::hex_range;
use crate::hexrange::HexSearch;
use crate::nip05;
use crate::event::Event;
use crate::notice::Notice;
use crate::schema::{upgrade_db, STARTUP_SQL};
use crate::subscription::ReqFilter;
use crate::subscription::Subscription;
use crate::utils::{is_hex, is_lower_hex};
use crate::server::NostrMetrics;
use crate::nauthz;
use governor::clock::Clock;
use governor::{Quota, RateLimiter};
use hex;
use r2d2;
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::params;
use rusqlite::types::ToSql;
use rusqlite::OpenFlags;
use std::fmt::Write as _;
use std::path::Path;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use std::time::Instant;
use tokio::task;
use sqlx::pool::PoolOptions;
use sqlx::postgres::PgConnectOptions;
use sqlx::ConnectOptions;
use crate::repo::sqlite::SqliteRepo;
use crate::repo::postgres::{PostgresRepo,PostgresPool};
use crate::repo::NostrRepo;
use std::time::{Instant, Duration};
use tracing::log::LevelFilter;
use tracing::{debug, info, trace, warn};
pub type SqlitePool = r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>;
@@ -34,369 +27,305 @@ pub type PooledConnection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnection
pub struct SubmittedEvent {
pub event: Event,
pub notice_tx: tokio::sync::mpsc::Sender<Notice>,
pub source_ip: String,
pub origin: Option<String>,
pub user_agent: Option<String>,
}
/// Database file
pub const DB_FILE: &str = "nostr.db";
/// Build a database connection pool.
/// Build repo
/// # Panics
///
/// Will panic if the pool could not be created.
#[must_use]
pub fn build_pool(
name: &str,
settings: &Settings,
flags: OpenFlags,
min_size: u32,
max_size: u32,
wait_for_db: bool,
) -> SqlitePool {
let db_dir = &settings.database.data_directory;
let full_path = Path::new(db_dir).join(DB_FILE);
// small hack; if the database doesn't exist yet, that means the
// writer thread hasn't finished. Give it a chance to work. This
// is only an issue with the first time we run.
if !settings.database.in_memory {
while !full_path.exists() && wait_for_db {
debug!("Database reader pool is waiting on the database to be created...");
thread::sleep(Duration::from_millis(500));
}
pub async fn build_repo(settings: &Settings, metrics: NostrMetrics) -> Arc<dyn NostrRepo> {
match settings.database.engine.as_str() {
"sqlite" => {Arc::new(build_sqlite_pool(settings, metrics).await)},
"postgres" => {Arc::new(build_postgres_pool(settings, metrics).await)},
_ => panic!("Unknown database engine"),
}
let manager = if settings.database.in_memory {
SqliteConnectionManager::memory()
.with_flags(flags)
.with_init(|c| c.execute_batch(STARTUP_SQL))
} else {
SqliteConnectionManager::file(&full_path)
.with_flags(flags)
.with_init(|c| c.execute_batch(STARTUP_SQL))
};
let pool: SqlitePool = r2d2::Pool::builder()
.test_on_check_out(true) // no noticeable performance hit
.min_idle(Some(min_size))
.max_size(max_size)
.build(manager)
.unwrap();
info!(
"Built a connection pool {:?} (min={}, max={})",
name, min_size, max_size
);
pool
}
/// Spawn a database writer that persists events to the SQLite store.
async fn build_sqlite_pool(settings: &Settings, metrics: NostrMetrics) -> SqliteRepo {
let repo = SqliteRepo::new(settings, metrics);
repo.start().await.ok();
repo.migrate_up().await.ok();
repo
}
async fn build_postgres_pool(settings: &Settings, metrics: NostrMetrics) -> PostgresRepo {
let mut options: PgConnectOptions = settings.database.connection.as_str().parse().unwrap();
options.log_statements(LevelFilter::Debug);
options.log_slow_statements(LevelFilter::Warn, Duration::from_secs(60));
let pool: PostgresPool = PoolOptions::new()
.max_connections(settings.database.max_conn)
.min_connections(settings.database.min_conn)
.idle_timeout(Duration::from_secs(60))
.connect_with(options)
.await
.unwrap();
let repo = PostgresRepo::new(pool, metrics);
// Panic on migration failure
let version = repo.migrate_up().await.unwrap();
info!("Postgres migration completed, at v{}", version);
repo
}
/// Spawn a database writer that persists events to the `SQLite` store.
pub async fn db_writer(
repo: Arc<dyn NostrRepo>,
settings: Settings,
mut event_rx: tokio::sync::mpsc::Receiver<SubmittedEvent>,
bcast_tx: tokio::sync::broadcast::Sender<Event>,
metadata_tx: tokio::sync::broadcast::Sender<Event>,
mut shutdown: tokio::sync::broadcast::Receiver<()>,
) -> tokio::task::JoinHandle<Result<()>> {
) -> Result<()> {
// are we performing NIP-05 checking?
let nip05_active = settings.verified_users.is_active();
// are we requriing NIP-05 user verification?
let nip05_enabled = settings.verified_users.is_enabled();
task::spawn_blocking(move || {
let db_dir = &settings.database.data_directory;
let full_path = Path::new(db_dir).join(DB_FILE);
// create a connection pool
let pool = build_pool(
"event writer",
&settings,
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
1,
4,
false,
);
if settings.database.in_memory {
info!("using in-memory database, this will not persist a restart!");
//upgrade_db(&mut pool.get()?)?;
// Make a copy of the whitelist
let whitelist = &settings.authorization.pubkey_whitelist.clone();
// get rate limit settings
let rps_setting = settings.limits.messages_per_sec;
let mut most_recent_rate_limit = Instant::now();
let mut lim_opt = None;
let clock = governor::clock::QuantaClock::default();
if let Some(rps) = rps_setting {
if rps > 0 {
info!("Enabling rate limits for event creation ({}/sec)", rps);
let quota = core::num::NonZeroU32::new(rps * 60).unwrap();
lim_opt = Some(RateLimiter::direct(Quota::per_minute(quota)));
}
}
// create a client if GRPC is enabled.
// Check with externalized event admitter service, if one is defined.
let mut grpc_client = if let Some(svr) = settings.grpc.event_admission_server {
Some(nauthz::EventAuthzService::connect(&svr).await)
} else {
None
};
//let gprc_client = settings.grpc.event_admission_server.map(|s| {
// event_admitter_connect(&s);
// });
loop {
if shutdown.try_recv().is_ok() {
info!("shutting down database writer");
break;
}
// call blocking read on channel
let next_event = event_rx.recv().await;
// if the channel has closed, we will never get work
if next_event.is_none() {
break;
}
// track if an event write occurred; this is used to
// update the rate limiter
let mut event_write = false;
let subm_event = next_event.unwrap();
let event = subm_event.event;
let notice_tx = subm_event.notice_tx;
// check if this event is authorized.
if let Some(allowed_addrs) = whitelist {
// TODO: incorporate delegated pubkeys
// if the event address is not in allowed_addrs.
if !allowed_addrs.contains(&event.pubkey) {
debug!(
"rejecting event: {}, unauthorized author",
event.get_event_id_prefix()
);
notice_tx
.try_send(Notice::blocked(
event.id,
"pubkey is not allowed to publish to this relay",
))
.ok();
continue;
}
}
// Check that event kind isn't blacklisted
let kinds_blacklist = &settings.limits.event_kind_blacklist.clone();
if let Some(event_kind_blacklist) = kinds_blacklist {
if event_kind_blacklist.contains(&event.kind) {
debug!(
"rejecting event: {}, blacklisted kind: {}",
&event.get_event_id_prefix(),
&event.kind
);
notice_tx
.try_send(Notice::blocked(
event.id,
"event kind is blocked by relay"
))
.ok();
continue;
}
}
// send any metadata events to the NIP-05 verifier
if nip05_active && event.is_kind_metadata() {
// we are sending this prior to even deciding if we
// persist it. this allows the nip05 module to
// inspect it, update if necessary, or persist a new
// event and broadcast it itself.
metadata_tx.send(event.clone()).ok();
}
// get a validation result for use in verification and GPRC
let validation = if nip05_active {
Some(repo.get_latest_user_verification(&event.pubkey).await)
} else {
info!("opened database {:?} for writing", full_path);
}
upgrade_db(&mut pool.get()?)?;
None
};
// Make a copy of the whitelist
let whitelist = &settings.authorization.pubkey_whitelist.clone();
// check for NIP-05 verification
if nip05_enabled && validation.is_some() {
match validation.as_ref().unwrap() {
Ok(uv) => {
if uv.is_valid(&settings.verified_users) {
info!(
"new event from verified author ({:?},{:?})",
uv.name.to_string(),
event.get_author_prefix()
);
// get rate limit settings
let rps_setting = settings.limits.messages_per_sec;
let mut most_recent_rate_limit = Instant::now();
let mut lim_opt = None;
let clock = governor::clock::QuantaClock::default();
if let Some(rps) = rps_setting {
if rps > 0 {
info!("Enabling rate limits for event creation ({}/sec)", rps);
let quota = core::num::NonZeroU32::new(rps * 60).unwrap();
lim_opt = Some(RateLimiter::direct(Quota::per_minute(quota)));
}
}
loop {
if shutdown.try_recv().is_ok() {
info!("shutting down database writer");
break;
}
// call blocking read on channel
let next_event = event_rx.blocking_recv();
// if the channel has closed, we will never get work
if next_event.is_none() {
break;
}
// track if an event write occurred; this is used to
// update the rate limiter
let mut event_write = false;
let subm_event = next_event.unwrap();
let event = subm_event.event;
let notice_tx = subm_event.notice_tx;
// check if this event is authorized.
if let Some(allowed_addrs) = whitelist {
// TODO: incorporate delegated pubkeys
// if the event address is not in allowed_addrs.
if !allowed_addrs.contains(&event.pubkey) {
info!(
"Rejecting event {}, unauthorized author",
event.get_event_id_prefix()
);
notice_tx
.try_send(Notice::blocked(
event.id,
"pubkey is not allowed to publish to this relay",
))
.ok();
continue;
}
}
// send any metadata events to the NIP-05 verifier
if nip05_active && event.is_kind_metadata() {
// we are sending this prior to even deciding if we
// persist it. this allows the nip05 module to
// inspect it, update if necessary, or persist a new
// event and broadcast it itself.
metadata_tx.send(event.clone()).ok();
}
// check for NIP-05 verification
if nip05_enabled {
match nip05::query_latest_user_verification(pool.get()?, event.pubkey.to_owned()) {
Ok(uv) => {
if uv.is_valid(&settings.verified_users) {
info!(
"new event from verified author ({:?},{:?})",
uv.name.to_string(),
event.get_author_prefix()
);
} else {
info!("rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)",
uv.name.to_string(),
event.get_author_prefix()
);
notice_tx
.try_send(Notice::blocked(
event.id,
"NIP-05 verification is no longer valid (expired/wrong domain)",
))
.ok();
continue;
}
}
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
debug!(
"no verification records found for pubkey: {:?}",
} else {
info!(
"rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)",
uv.name.to_string(),
event.get_author_prefix()
);
notice_tx
.try_send(Notice::blocked(
event.id,
"NIP-05 verification needed to publish events",
"NIP-05 verification is no longer valid (expired/wrong domain)",
))
.ok();
continue;
}
Err(e) => {
warn!("checking nip05 verification status failed: {:?}", e);
continue;
}
}
}
// TODO: cache recent list of authors to remove a DB call.
let start = Instant::now();
if event.kind >= 20000 && event.kind < 30000 {
bcast_tx.send(event.clone()).ok();
info!(
"published ephemeral event: {:?} from: {:?} in: {:?}",
event.get_event_id_prefix(),
event.get_author_prefix(),
start.elapsed()
);
event_write = true
} else {
match write_event(&mut pool.get()?, &event) {
Ok(updated) => {
if updated == 0 {
trace!("ignoring duplicate or deleted event");
notice_tx.try_send(Notice::duplicate(event.id)).ok();
} else {
info!(
"persisted event: {:?} from: {:?} in: {:?}",
event.get_event_id_prefix(),
event.get_author_prefix(),
start.elapsed()
);
event_write = true;
// send this out to all clients
bcast_tx.send(event.clone()).ok();
notice_tx.try_send(Notice::saved(event.id)).ok();
}
}
Err(err) => {
warn!("event insert failed: {:?}", err);
let msg = "relay experienced an error trying to publish the latest event";
notice_tx.try_send(Notice::error(event.id, msg)).ok();
}
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
debug!(
"no verification records found for pubkey: {:?}",
event.get_author_prefix()
);
notice_tx
.try_send(Notice::blocked(
event.id,
"NIP-05 verification needed to publish events",
))
.ok();
continue;
}
}
// use rate limit, if defined, and if an event was actually written.
if event_write {
if let Some(ref lim) = lim_opt {
if let Err(n) = lim.check() {
let wait_for = n.wait_time_from(clock.now());
// check if we have recently logged rate
// limits, but print out a message only once
// per second.
if most_recent_rate_limit.elapsed().as_secs() > 10 {
warn!(
"rate limit reached for event creation (sleep for {:?}) (suppressing future messages for 10 seconds)",
wait_for
);
// reset last rate limit message
most_recent_rate_limit = Instant::now();
}
// block event writes, allowing them to queue up
thread::sleep(wait_for);
continue;
}
Err(e) => {
warn!("checking nip05 verification status failed: {:?}", e);
continue;
}
}
}
info!("database connection closed");
Ok(())
})
}
/// Persist an event to the database, returning rows added.
pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
// start transaction
let tx = conn.transaction()?;
// get relevant fields from event and convert to blobs.
let id_blob = hex::decode(&e.id).ok();
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
let delegator_blob: Option<Vec<u8>> = e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
let event_str = serde_json::to_string(&e).ok();
// ignore if the event hash is a duplicate.
let mut ins_count = tx.execute(
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, strftime('%s','now'), FALSE);",
params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str]
)?;
if ins_count == 0 {
// if the event was a duplicate, no need to insert event or
// pubkey references. This will abort the txn.
return Ok(ins_count);
}
// remember primary key of the event most recently inserted.
let ev_id = tx.last_insert_rowid();
// add all tags to the tag table
for tag in e.tags.iter() {
// ensure we have 2 values.
if tag.len() >= 2 {
let tagname = &tag[0];
let tagval = &tag[1];
// only single-char tags are searchable
let tagchar_opt = single_char_tagname(tagname);
match &tagchar_opt {
Some(_) => {
// if tagvalue is lowercase hex;
if is_lower_hex(tagval) && (tagval.len() % 2 == 0) {
tx.execute(
"INSERT OR IGNORE INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3)",
params![ev_id, &tagname, hex::decode(tagval).ok()],
)?;
// nip05 address
let nip05_address : Option<crate::nip05::Nip05Name> = validation.and_then(|x| x.ok().map(|y| y.name));
// GRPC check
if let Some(ref mut c) = grpc_client {
trace!("checking if grpc permits");
let grpc_start = Instant::now();
let decision_res = c.admit_event(&event, &subm_event.source_ip, subm_event.origin, subm_event.user_agent, nip05_address).await;
match decision_res {
Ok(decision) => {
if !decision.permitted() {
// GPRC returned a decision to reject this event
info!("GRPC rejected event: {:?} (kind: {}) from: {:?} in: {:?} (IP: {:?})",
event.get_event_id_prefix(),
event.kind,
event.get_author_prefix(),
grpc_start.elapsed(),
subm_event.source_ip);
notice_tx.try_send(Notice::blocked(event.id, &decision.message().unwrap_or_else(|| "".to_string()))).ok();
continue;
}
},
Err(e) => {
warn!("GRPC server error: {:?}", e);
}
}
}
// TODO: cache recent list of authors to remove a DB call.
let start = Instant::now();
if event.is_ephemeral() {
bcast_tx.send(event.clone()).ok();
debug!(
"published ephemeral event: {:?} from: {:?} in: {:?}",
event.get_event_id_prefix(),
event.get_author_prefix(),
start.elapsed()
);
event_write = true;
} else {
match repo.write_event(&event).await {
Ok(updated) => {
if updated == 0 {
trace!("ignoring duplicate or deleted event");
notice_tx.try_send(Notice::duplicate(event.id)).ok();
} else {
tx.execute(
"INSERT OR IGNORE INTO tag (event_id, name, value) VALUES (?1, ?2, ?3)",
params![ev_id, &tagname, &tagval],
)?;
info!(
"persisted event: {:?} (kind: {}) from: {:?} in: {:?} (IP: {:?})",
event.get_event_id_prefix(),
event.kind,
event.get_author_prefix(),
start.elapsed(),
subm_event.source_ip,
);
event_write = true;
// send this out to all clients
bcast_tx.send(event.clone()).ok();
notice_tx.try_send(Notice::saved(event.id)).ok();
}
}
None => {}
Err(err) => {
warn!("event insert failed: {:?}", err);
let msg = "relay experienced an error trying to publish the latest event";
notice_tx.try_send(Notice::error(event.id, msg)).ok();
}
}
}
// use rate limit, if defined, and if an event was actually written.
if event_write {
if let Some(ref lim) = lim_opt {
if let Err(n) = lim.check() {
let wait_for = n.wait_time_from(clock.now());
// check if we have recently logged rate
// limits, but print out a message only once
// per second.
if most_recent_rate_limit.elapsed().as_secs() > 10 {
warn!(
"rate limit reached for event creation (sleep for {:?}) (suppressing future messages for 10 seconds)",
wait_for
);
// reset last rate limit message
most_recent_rate_limit = Instant::now();
}
// block event writes, allowing them to queue up
thread::sleep(wait_for);
continue;
}
}
}
}
// if this event is replaceable update, hide every other replaceable
// event with the same kind from the same author that was issued
// earlier than this.
if e.kind == 0 || e.kind == 3 || (e.kind >= 10000 && e.kind < 20000) {
let update_count = tx.execute(
"UPDATE event SET hidden=TRUE WHERE id!=? AND kind=? AND author=? AND created_at <= ? and hidden!=TRUE",
params![ev_id, e.kind, hex::decode(&e.pubkey).ok(), e.created_at],
)?;
if update_count > 0 {
info!(
"hid {} older replaceable kind {} events for author: {:?}",
update_count,
e.kind,
e.get_author_prefix()
);
}
}
// if this event is a deletion, hide the referenced events from the same author.
if e.kind == 5 {
let event_candidates = e.tag_values_by_name("e");
// first parameter will be author
let mut params: Vec<Box<dyn ToSql>> = vec![Box::new(hex::decode(&e.pubkey)?)];
event_candidates
.iter()
.filter(|x| is_hex(x) && x.len() == 64)
.filter_map(|x| hex::decode(x).ok())
.for_each(|x| params.push(Box::new(x)));
let query = format!(
"UPDATE event SET hidden=TRUE WHERE kind!=5 AND author=? AND event_hash IN ({})",
repeat_vars(params.len() - 1)
);
let mut stmt = tx.prepare(&query)?;
let update_count = stmt.execute(rusqlite::params_from_iter(params))?;
info!(
"hid {} deleted events for author {:?}",
update_count,
e.get_author_prefix()
);
} else {
// check if a deletion has already been recorded for this event.
// Only relevant for non-deletion events
let del_count = tx.query_row(
"SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.author=? AND t.name='e' AND e.kind=5 AND t.value_hex=? LIMIT 1;",
params![pubkey_blob, id_blob], |row| row.get::<usize, usize>(0));
// check if a the query returned a result, meaning we should
// hid the current event
if del_count.ok().is_some() {
// a deletion already existed, mark original event as hidden.
info!(
"hid event: {:?} due to existing deletion by author: {:?}",
e.get_event_id_prefix(),
e.get_author_prefix()
);
let _update_count =
tx.execute("UPDATE event SET hidden=TRUE WHERE id=?", params![ev_id])?;
// event was deleted, so let caller know nothing new
// arrived, preventing this from being sent to active
// subscriptions
ins_count = 0;
}
}
tx.commit()?;
Ok(ins_count)
info!("database connection closed");
Ok(())
}
/// Serialized event associated with a specific subscription request.
@@ -407,293 +336,3 @@ pub struct QueryResult {
/// Serialized event
pub event: String,
}
/// Produce a arbitrary list of '?' parameters.
fn repeat_vars(count: usize) -> String {
if count == 0 {
return "".to_owned();
}
let mut s = "?,".repeat(count);
// Remove trailing comma
s.pop();
s
}
/// Create a dynamic SQL subquery and params from a subscription filter.
fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
// build a dynamic SQL query. all user-input is either an integer
// (sqli-safe), or a string that is filtered to only contain
// hexadecimal characters. Strings that require escaping (tag
// names/values) use parameters.
// if the filter is malformed, don't return anything.
if f.force_no_match {
let empty_query = "SELECT e.content, e.created_at FROM event e WHERE 1=0".to_owned();
// query parameters for SQLite
let empty_params: Vec<Box<dyn ToSql>> = vec![];
return (empty_query, empty_params);
}
let mut query = "SELECT e.content, e.created_at FROM event e".to_owned();
// query parameters for SQLite
let mut params: Vec<Box<dyn ToSql>> = vec![];
// individual filter components (single conditions such as an author or event ID)
let mut filter_components: Vec<String> = Vec::new();
// Query for "authors", allowing prefix matches
if let Some(authvec) = &f.authors {
// take each author and convert to a hexsearch
let mut auth_searches: Vec<String> = vec![];
for auth in authvec {
match hex_range(auth) {
Some(HexSearch::Exact(ex)) => {
auth_searches.push("author=? OR delegated_by=?".to_owned());
params.push(Box::new(ex.clone()));
params.push(Box::new(ex));
}
Some(HexSearch::Range(lower, upper)) => {
auth_searches.push(
"(author>? AND author<?) OR (delegated_by>? AND delegated_by<?)".to_owned(),
);
params.push(Box::new(lower.clone()));
params.push(Box::new(upper.clone()));
params.push(Box::new(lower));
params.push(Box::new(upper));
}
Some(HexSearch::LowerOnly(lower)) => {
auth_searches.push("author>? OR delegated_by>?".to_owned());
params.push(Box::new(lower.clone()));
params.push(Box::new(lower));
}
None => {
info!("Could not parse hex range from author {:?}", auth);
}
}
}
if !authvec.is_empty() {
let authors_clause = format!("({})", auth_searches.join(" OR "));
filter_components.push(authors_clause);
} else {
// if the authors list was empty, we should never return
// any results.
filter_components.push("false".to_owned());
}
}
// Query for Kind
if let Some(ks) = &f.kinds {
// kind is number, no escaping needed
let str_kinds: Vec<String> = ks.iter().map(|x| x.to_string()).collect();
let kind_clause = format!("kind IN ({})", str_kinds.join(", "));
filter_components.push(kind_clause);
}
// Query for event, allowing prefix matches
if let Some(idvec) = &f.ids {
// take each author and convert to a hexsearch
let mut id_searches: Vec<String> = vec![];
for id in idvec {
match hex_range(id) {
Some(HexSearch::Exact(ex)) => {
id_searches.push("event_hash=?".to_owned());
params.push(Box::new(ex));
}
Some(HexSearch::Range(lower, upper)) => {
id_searches.push("(event_hash>? AND event_hash<?)".to_owned());
params.push(Box::new(lower));
params.push(Box::new(upper));
}
Some(HexSearch::LowerOnly(lower)) => {
id_searches.push("event_hash>?".to_owned());
params.push(Box::new(lower));
}
None => {
info!("Could not parse hex range from id {:?}", id);
}
}
}
if !idvec.is_empty() {
let id_clause = format!("({})", id_searches.join(" OR "));
filter_components.push(id_clause);
} else {
// if the ids list was empty, we should never return
// any results.
filter_components.push("false".to_owned());
}
}
// Query for tags
if let Some(map) = &f.tags {
for (key, val) in map.iter() {
let mut str_vals: Vec<Box<dyn ToSql>> = vec![];
let mut blob_vals: Vec<Box<dyn ToSql>> = vec![];
for v in val {
if (v.len() % 2 == 0) && is_lower_hex(v) {
if let Ok(h) = hex::decode(v) {
blob_vals.push(Box::new(h));
}
} else {
str_vals.push(Box::new(v.to_owned()));
}
}
// create clauses with "?" params for each tag value being searched
let str_clause = format!("value IN ({})", repeat_vars(str_vals.len()));
let blob_clause = format!("value_hex IN ({})", repeat_vars(blob_vals.len()));
// find evidence of the target tag name/value existing for this event.
let tag_clause = format!("e.id IN (SELECT e.id FROM event e LEFT JOIN tag t on e.id=t.event_id WHERE hidden!=TRUE and (name=? AND ({} OR {})))", str_clause, blob_clause);
// add the tag name as the first parameter
params.push(Box::new(key.to_string()));
// add all tag values that are plain strings as params
params.append(&mut str_vals);
// add all tag values that are blobs as params
params.append(&mut blob_vals);
filter_components.push(tag_clause);
}
}
// Query for timestamp
if f.since.is_some() {
let created_clause = format!("created_at > {}", f.since.unwrap());
filter_components.push(created_clause);
}
// Query for timestamp
if f.until.is_some() {
let until_clause = format!("created_at < {}", f.until.unwrap());
filter_components.push(until_clause);
}
// never display hidden events
query.push_str(" WHERE hidden!=TRUE");
// build filter component conditions
if !filter_components.is_empty() {
query.push_str(" AND ");
query.push_str(&filter_components.join(" AND "));
}
// Apply per-filter limit to this subquery.
// The use of a LIMIT implies a DESC order, to capture only the most recent events.
if let Some(lim) = f.limit {
let _ = write!(query, " ORDER BY e.created_at DESC LIMIT {}", lim);
} else {
query.push_str(" ORDER BY e.created_at ASC")
}
(query, params)
}
/// Create a dynamic SQL query string and params from a subscription.
fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
// build a dynamic SQL query for an entire subscription, based on
// SQL subqueries for filters.
let mut subqueries: Vec<String> = Vec::new();
// subquery params
let mut params: Vec<Box<dyn ToSql>> = vec![];
// for every filter in the subscription, generate a subquery
for f in sub.filters.iter() {
let (f_subquery, mut f_params) = query_from_filter(f);
subqueries.push(f_subquery);
params.append(&mut f_params);
}
// encapsulate subqueries into select statements
let subqueries_selects: Vec<String> = subqueries
.iter()
.map(|s| format!("SELECT distinct content, created_at FROM ({})", s))
.collect();
let query: String = subqueries_selects.join(" UNION ");
(query, params)
}
fn log_pool_stats(pool: &SqlitePool) {
let state: r2d2::State = pool.state();
let in_use_cxns = state.connections - state.idle_connections;
debug!(
"DB pool usage (in_use: {}, available: {})",
in_use_cxns, state.connections
);
}
/// Perform a database query using a subscription.
///
/// The [`Subscription`] is converted into a SQL query. Each result
/// is published on the `query_tx` channel as it is returned. If a
/// message becomes available on the `abandon_query_rx` channel, the
/// query is immediately aborted.
pub async fn db_query(
sub: Subscription,
client_id: String,
pool: SqlitePool,
query_tx: tokio::sync::mpsc::Sender<QueryResult>,
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
) {
task::spawn_blocking(move || {
let mut row_count: usize = 0;
let start = Instant::now();
// generate SQL query
let (q, p) = query_from_sub(&sub);
trace!("SQL generated in {:?}", start.elapsed());
// show pool stats
log_pool_stats(&pool);
// cutoff for displaying slow queries
let slow_cutoff = Duration::from_millis(1000);
let start = Instant::now();
if let Ok(conn) = pool.get() {
// execute the query. Don't cache, since queries vary so much.
let mut stmt = conn.prepare(&q)?;
let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?;
let mut first_result = true;
while let Some(row) = event_rows.next()? {
if first_result {
let first_result_elapsed = start.elapsed();
// logging for slow queries; show sub and SQL
if first_result_elapsed >= slow_cutoff {
info!(
"going to query for: {:?} (cid: {}, sub: {:?})",
sub, client_id, sub.id
);
info!(
"final query string (slow): {} (cid: {}, sub: {:?})",
q, client_id, sub.id
);
} else {
trace!(
"going to query for: {:?} (cid: {}, sub: {:?})",
sub,
client_id,
sub.id
);
trace!("final query string: {}", q);
}
debug!(
"first result in {:?} (cid: {}, sub: {:?})",
first_result_elapsed, client_id, sub.id
);
first_result = false;
}
// check if this is still active
// TODO: check every N rows
if abandon_query_rx.try_recv().is_ok() {
debug!("query aborted (cid: {}, sub: {:?})", client_id, sub.id);
return Ok(());
}
row_count += 1;
let event_json = row.get(0)?;
query_tx
.blocking_send(QueryResult {
sub_id: sub.get_id(),
event: event_json,
})
.ok();
}
query_tx
.blocking_send(QueryResult {
sub_id: sub.get_id(),
event: "EOSE".to_string(),
})
.ok();
debug!(
"query completed in {:?} (cid: {}, sub: {:?}, rows: {})",
start.elapsed(),
client_id,
sub.id,
row_count
);
} else {
warn!("Could not get a database connection for querying");
}
let ok: Result<()> = Ok(());
ok
});
}

View File

@@ -80,11 +80,11 @@ impl FromStr for Operator {
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct ConditionQuery {
pub(crate) conditions: Vec<Condition>,
pub conditions: Vec<Condition>,
}
impl ConditionQuery {
pub fn allows_event(&self, event: &Event) -> bool {
#[must_use] pub fn allows_event(&self, event: &Event) -> bool {
// check each condition, to ensure that the event complies
// with the restriction.
for c in &self.conditions {
@@ -101,14 +101,14 @@ impl ConditionQuery {
}
// Verify that the delegator approved the delegation; return a ConditionQuery if so.
pub fn validate_delegation(
#[must_use] pub fn validate_delegation(
delegator: &str,
delegatee: &str,
cond_query: &str,
sigstr: &str,
) -> Option<ConditionQuery> {
// form the token
let tok = format!("nostr:delegation:{}:{}", delegatee, cond_query);
let tok = format!("nostr:delegation:{delegatee}:{cond_query}");
// form SHA256 hash
let digest: sha256::Hash = sha256::Hash::hash(tok.as_bytes());
let sig = schnorr::Signature::from_str(sigstr).unwrap();
@@ -133,18 +133,18 @@ pub fn validate_delegation(
}
/// Parsed delegation condition
/// see https://github.com/nostr-protocol/nips/pull/28#pullrequestreview-1084903800
/// An example complex condition would be: kind=1,2,3&created_at<1665265999
/// see <https://github.com/nostr-protocol/nips/pull/28#pullrequestreview-1084903800>
/// An example complex condition would be: `kind=1,2,3&created_at<1665265999`
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct Condition {
pub(crate) field: Field,
pub(crate) operator: Operator,
pub(crate) values: Vec<u64>,
pub field: Field,
pub operator: Operator,
pub values: Vec<u64>,
}
impl Condition {
/// Check if this condition allows the given event to be delegated
pub fn allows_event(&self, event: &Event) -> bool {
#[must_use] pub fn allows_event(&self, event: &Event) -> bool {
// determine what the right-hand side of the operator is
let resolved_field = match &self.field {
Field::Kind => event.kind,
@@ -323,7 +323,7 @@ mod tests {
Condition {
field: Field::CreatedAt,
operator: Operator::LessThan,
values: vec![1665867123],
values: vec![1_665_867_123],
},
],
};
@@ -332,19 +332,6 @@ mod tests {
assert_eq!(parsed, cq);
Ok(())
}
fn simple_event() -> Event {
Event {
id: "0".to_owned(),
pubkey: "0".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: vec![],
content: "".to_owned(),
sig: "0".to_owned(),
tagidx: None,
}
}
// Check for condition logic on event w/ empty values
#[test]
fn condition_with_empty_values() {
@@ -353,7 +340,7 @@ mod tests {
operator: Operator::GreaterThan,
values: vec![],
};
let e = simple_event();
let e = Event::simple_event();
assert!(!c.allows_event(&e));
c.operator = Operator::LessThan;
assert!(!c.allows_event(&e));
@@ -373,7 +360,7 @@ mod tests {
operator: Operator::GreaterThan,
values: vec![10],
};
let mut e = simple_event();
let mut e = Event::simple_event();
// kind is not greater than 10, not allowed
e.kind = 1;
assert!(!c.allows_event(&e));
@@ -392,7 +379,7 @@ mod tests {
operator: Operator::Equals,
values: vec![0, 10, 20],
};
let mut e = simple_event();
let mut e = Event::simple_event();
// Allow if event kind is in list for Equals
e.kind = 10;
assert!(c.allows_event(&e));

View File

@@ -48,6 +48,10 @@ pub enum Error {
DatabaseDirError,
#[error("Database Connection Pool Error")]
DatabasePoolError(r2d2::Error),
#[error("SQL error")]
SqlxError(sqlx::Error),
#[error("Database Connection Pool Error")]
SqlxDatabasePoolError(sqlx::Error),
#[error("Custom Error : {0}")]
CustomError(String),
#[error("Task join error")]
@@ -58,6 +62,12 @@ pub enum Error {
HexError(hex::FromHexError),
#[error("Delegation parse error")]
DelegationParseError,
#[error("Channel closed error")]
ChannelClosed,
#[error("Authz error")]
AuthzError,
#[error("Tonic GRPC error")]
TonicError(tonic::Status),
#[error("Unknown/Undocumented")]
UnknownError,
}
@@ -100,6 +110,12 @@ impl From<rusqlite::Error> for Error {
}
}
impl From<sqlx::Error> for Error {
fn from(d: sqlx::Error) -> Self {
Error::SqlxDatabasePoolError(d)
}
}
impl From<serde_json::Error> for Error {
/// Wrap JSON error
fn from(r: serde_json::Error) -> Self {
@@ -120,3 +136,10 @@ impl From<config::ConfigError> for Error {
Error::ConfigError(r)
}
}
impl From<tonic::Status> for Error {
/// Wrap Config error
fn from(r: tonic::Status) -> Self {
Error::TonicError(r)
}
}

View File

@@ -1,6 +1,6 @@
//! Event parsing and validation
use crate::delegation::validate_delegation;
use crate::error::Error::*;
use crate::error::Error::{CommandUnknownError, EventCouldNotCanonicalize, EventInvalidId, EventInvalidSignature, EventMalformedPubkey};
use crate::error::Result;
use crate::nip05;
use crate::utils::unix_time;
@@ -28,7 +28,7 @@ pub struct EventCmd {
}
impl EventCmd {
pub fn event_id(&self) -> &str {
#[must_use] pub fn event_id(&self) -> &str {
&self.event.id
}
}
@@ -37,19 +37,19 @@ impl EventCmd {
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct Event {
pub id: String,
pub(crate) pubkey: String,
pub pubkey: String,
#[serde(skip)]
pub(crate) delegated_by: Option<String>,
pub(crate) created_at: u64,
pub(crate) kind: u64,
pub delegated_by: Option<String>,
pub created_at: u64,
pub kind: u64,
#[serde(deserialize_with = "tag_from_string")]
// NOTE: array-of-arrays may need to be more general than a string container
pub(crate) tags: Vec<Vec<String>>,
pub(crate) content: String,
pub(crate) sig: String,
pub tags: Vec<Vec<String>>,
pub content: String,
pub sig: String,
// Optimization for tag search, built on demand.
#[serde(skip)]
pub(crate) tagidx: Option<HashMap<char, HashSet<String>>>,
pub tagidx: Option<HashMap<char, HashSet<String>>>,
}
/// Simple tag type for array of array of strings.
@@ -65,7 +65,7 @@ where
}
/// Attempt to form a single-char tag name.
pub fn single_char_tagname(tagname: &str) -> Option<char> {
#[must_use] pub fn single_char_tagname(tagname: &str) -> Option<char> {
// We return the tag character if and only if the tagname consists
// of a single char.
let mut tagnamechars = tagname.chars();
@@ -87,26 +87,78 @@ pub fn single_char_tagname(tagname: &str) -> Option<char> {
impl From<EventCmd> for Result<Event> {
fn from(ec: EventCmd) -> Result<Event> {
// ensure command is correct
if ec.cmd != "EVENT" {
Err(CommandUnknownError)
} else {
if ec.cmd == "EVENT" {
ec.event.validate().map(|_| {
let mut e = ec.event;
e.build_index();
e.update_delegation();
e
})
} else {
Err(CommandUnknownError)
}
}
}
impl Event {
pub fn is_kind_metadata(&self) -> bool {
#[cfg(test)]
#[must_use] pub fn simple_event() -> Event {
Event {
id: "0".to_owned(),
pubkey: "0".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: vec![],
content: "".to_owned(),
sig: "0".to_owned(),
tagidx: None,
}
}
#[must_use] pub fn is_kind_metadata(&self) -> bool {
self.kind == 0
}
/// Should this event be persisted?
#[must_use] pub fn is_ephemeral(&self) -> bool {
self.kind >= 20000 && self.kind < 30000
}
/// Should this event be replaced with newer timestamps from same author?
#[must_use] pub fn is_replaceable(&self) -> bool {
self.kind == 0 || self.kind == 3 || self.kind == 41 || (self.kind >= 10000 && self.kind < 20000)
}
/// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values?
#[must_use] pub fn is_param_replaceable(&self) -> bool {
self.kind >= 30000 && self.kind < 40000
}
/// What is the replaceable `d` tag value?
/// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values?
#[must_use] pub fn distinct_param(&self) -> Option<String> {
if self.is_param_replaceable() {
let default = "".to_string();
let dvals:Vec<&String> = self.tags
.iter()
.filter(|x| !x.is_empty())
.filter(|x| x.get(0).unwrap() == "d")
.map(|x| x.get(1).unwrap_or(&default)).take(1)
.collect();
let dval_first = dvals.get(0);
match dval_first {
Some(_) => {dval_first.map(|x| x.to_string())},
None => Some(default)
}
} else {
None
}
}
/// Pull a NIP-05 Name out of the event, if one exists
pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
#[must_use] pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
if self.is_kind_metadata() {
// very quick check if we should attempt to parse this json
if self.content.contains("\"nip05\"") {
@@ -123,7 +175,7 @@ impl Event {
// is this event delegated (properly)?
// does the signature match, and are conditions valid?
// if so, return an alternate author for the event
pub fn delegated_author(&self) -> Option<String> {
#[must_use] pub fn delegated_author(&self) -> Option<String> {
// is there a delegation tag?
let delegation_tag: Vec<String> = self
.tags
@@ -131,8 +183,7 @@ impl Event {
.filter(|x| x.len() == 4)
.filter(|x| x.get(0).unwrap() == "delegation")
.take(1)
.next()?
.to_vec(); // get first tag
.next()?.clone(); // get first tag
//let delegation_tag = self.tag_values_by_name("delegation");
// delegation tags should have exactly 3 elements after the name (pubkey, condition, sig)
@@ -161,11 +212,11 @@ impl Event {
}
/// Update delegation status
fn update_delegation(&mut self) {
pub fn update_delegation(&mut self) {
self.delegated_by = self.delegated_author();
}
/// Build an event tag index
fn build_index(&mut self) {
pub fn build_index(&mut self) {
// if there are no tags; just leave the index as None
if self.tags.is_empty() {
return;
@@ -192,24 +243,24 @@ impl Event {
}
/// Create a short event identifier, suitable for logging.
pub fn get_event_id_prefix(&self) -> String {
#[must_use] pub fn get_event_id_prefix(&self) -> String {
self.id.chars().take(8).collect()
}
pub fn get_author_prefix(&self) -> String {
#[must_use] pub fn get_author_prefix(&self) -> String {
self.pubkey.chars().take(8).collect()
}
/// Retrieve tag initial values across all tags matching the name
pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
#[must_use] pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
self.tags
.iter()
.filter(|x| x.len() > 1)
.filter(|x| x.get(0).unwrap() == tag_name)
.map(|x| x.get(1).unwrap().to_owned())
.map(|x| x.get(1).unwrap().clone())
.collect()
}
pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
#[must_use] pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
if let Some(allowable_future) = reject_future_seconds {
let curr_time = unix_time();
// calculate difference, plus how far future we allow
@@ -226,7 +277,7 @@ impl Event {
}
/// Check if this event has a valid signature.
fn validate(&self) -> Result<()> {
pub fn validate(&self) -> Result<()> {
// TODO: return a Result with a reason for invalid events
// validation is performed by:
// * parsing JSON string into event fields
@@ -241,7 +292,7 @@ impl Event {
let c = c_opt.unwrap();
// * compute the sha256sum.
let digest: sha256::Hash = sha256::Hash::hash(c.as_bytes());
let hex_digest = format!("{:x}", digest);
let hex_digest = format!("{digest:x}");
// * ensure the id matches the computed sha256sum.
if self.id != hex_digest {
debug!("event id does not match digest");
@@ -271,7 +322,7 @@ impl Event {
let id = Number::from(0_u64);
c.push(serde_json::Value::Number(id));
// public key
c.push(Value::String(self.pubkey.to_owned()));
c.push(Value::String(self.pubkey.clone()));
// creation time
let created_at = Number::from(self.created_at);
c.push(serde_json::Value::Number(created_at));
@@ -281,7 +332,7 @@ impl Event {
// tags
c.push(self.tags_to_canonical());
// content
c.push(Value::String(self.content.to_owned()));
c.push(Value::String(self.content.clone()));
serde_json::to_string(&Value::Array(c)).ok()
}
@@ -289,11 +340,11 @@ impl Event {
fn tags_to_canonical(&self) -> Value {
let mut tags = Vec::<Value>::new();
// iterate over self tags,
for t in self.tags.iter() {
for t in &self.tags {
// each tag is a vec of strings
let mut a = Vec::<Value>::new();
for v in t.iter() {
a.push(serde_json::Value::String(v.to_owned()));
a.push(serde_json::Value::String(v.clone()));
}
tags.push(serde_json::Value::Array(a));
}
@@ -301,7 +352,7 @@ impl Event {
}
/// Determine if the given tag and value set intersect with tags in this event.
pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
#[must_use] pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
match &self.tagidx {
// check if this is indexable tagname
Some(idx) => match idx.get(&tagname) {
@@ -319,31 +370,18 @@ impl Event {
#[cfg(test)]
mod tests {
use super::*;
fn simple_event() -> Event {
Event {
id: "0".to_owned(),
pubkey: "0".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: vec![],
content: "".to_owned(),
sig: "0".to_owned(),
tagidx: None,
}
}
#[test]
fn event_creation() {
// create an event
let event = simple_event();
let event = Event::simple_event();
assert_eq!(event.id, "0");
}
#[test]
fn event_serialize() -> Result<()> {
// serialize an event to JSON string
let event = simple_event();
let event = Event::simple_event();
let j = serde_json::to_string(&event)?;
assert_eq!(j, "{\"id\":\"0\",\"pubkey\":\"0\",\"created_at\":0,\"kind\":0,\"tags\":[],\"content\":\"\",\"sig\":\"0\"}");
Ok(())
@@ -351,14 +389,14 @@ mod tests {
#[test]
fn empty_event_tag_match() {
let event = simple_event();
let event = Event::simple_event();
assert!(!event
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
}
#[test]
fn single_event_tag_match() {
let mut event = simple_event();
let mut event = Event::simple_event();
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
event.build_index();
assert_eq!(
@@ -373,7 +411,7 @@ mod tests {
#[test]
fn event_tags_serialize() -> Result<()> {
// serialize an event with tags to JSON string
let mut event = simple_event();
let mut event = Event::simple_event();
event.tags = vec![
vec![
"e".to_owned(),
@@ -406,7 +444,7 @@ mod tests {
id: "999".to_owned(),
pubkey: "012345".to_owned(),
delegated_by: None,
created_at: 501234,
created_at: 501_234,
kind: 1,
tags: vec![],
content: "this is a test".to_owned(),
@@ -424,7 +462,7 @@ mod tests {
id: "999".to_owned(),
pubkey: "012345".to_owned(),
delegated_by: None,
created_at: 501234,
created_at: 501_234,
kind: 1,
tags: vec![
vec!["j".to_owned(), "abc".to_owned()],
@@ -451,7 +489,7 @@ mod tests {
id: "999".to_owned(),
pubkey: "012345".to_owned(),
delegated_by: None,
created_at: 501234,
created_at: 501_234,
kind: 1,
tags: vec![
vec!["j".to_owned(), "abc".to_owned()],
@@ -478,7 +516,7 @@ mod tests {
id: "999".to_owned(),
pubkey: "012345".to_owned(),
delegated_by: None,
created_at: 501234,
created_at: 501_234,
kind: 1,
tags: vec![
vec!["#e".to_owned(), "aoeu".to_owned()],
@@ -497,4 +535,123 @@ mod tests {
let expected = Some(expected_json.to_owned());
assert_eq!(c, expected);
}
#[test]
fn ephemeral_event() {
let mut event = Event::simple_event();
event.kind=20000;
assert!(event.is_ephemeral());
event.kind=29999;
assert!(event.is_ephemeral());
event.kind=30000;
assert!(!event.is_ephemeral());
event.kind=19999;
assert!(!event.is_ephemeral());
}
#[test]
fn replaceable_event() {
let mut event = Event::simple_event();
event.kind=0;
assert!(event.is_replaceable());
event.kind=3;
assert!(event.is_replaceable());
event.kind=10000;
assert!(event.is_replaceable());
event.kind=19999;
assert!(event.is_replaceable());
event.kind=20000;
assert!(!event.is_replaceable());
}
#[test]
fn param_replaceable_event() {
let mut event = Event::simple_event();
event.kind = 30000;
assert!(event.is_param_replaceable());
event.kind = 39999;
assert!(event.is_param_replaceable());
event.kind = 29999;
assert!(!event.is_param_replaceable());
event.kind = 40000;
assert!(!event.is_param_replaceable());
}
#[test]
fn param_replaceable_value_case_1() {
// NIP case #1: "tags":[["d",""]]
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![
vec!["d".to_owned(), "".to_owned()]];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
#[test]
fn param_replaceable_value_case_2() {
// NIP case #2: "tags":[]: implicit d tag with empty value
let mut event = Event::simple_event();
event.kind = 30000;
assert_eq!(event.distinct_param(), Some("".to_string()));
}
#[test]
fn param_replaceable_value_case_3() {
// NIP case #3: "tags":[["d"]]: implicit empty value ""
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![
vec!["d".to_owned()]];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
#[test]
fn param_replaceable_value_case_4() {
// NIP case #4: "tags":[["d",""],["d","not empty"]]: only first d tag is considered
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![
vec!["d".to_owned(), "".to_string()],
vec!["d".to_owned(), "not empty".to_string()]
];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
#[test]
fn param_replaceable_value_case_4b() {
// Variation of #4 with
// NIP case #4: "tags":[["d","not empty"],["d",""]]: only first d tag is considered
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![
vec!["d".to_owned(), "not empty".to_string()],
vec!["d".to_owned(), "".to_string()]
];
assert_eq!(event.distinct_param(), Some("not empty".to_string()));
}
#[test]
fn param_replaceable_value_case_5() {
// NIP case #5: "tags":[["d"],["d","some value"]]: only first d tag is considered
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![
vec!["d".to_owned()],
vec!["d".to_owned(), "second value".to_string()],
vec!["d".to_owned(), "third value".to_string()]
];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
#[test]
fn param_replaceable_value_case_6() {
// NIP case #6: "tags":[["e"]]: same as no tags
let mut event = Event::simple_event();
event.kind = 30000;
event.tags = vec![
vec!["e".to_owned()],
];
assert_eq!(event.distinct_param(), Some("".to_string()));
}
}

View File

@@ -1,5 +1,5 @@
//! Utilities for searching hexadecimal
use crate::utils::is_hex;
use crate::utils::{is_hex};
use hex;
/// Types of hexadecimal queries.
@@ -19,16 +19,15 @@ fn is_all_fs(s: &str) -> bool {
}
/// Find the next hex sequence greater than the argument.
pub fn hex_range(s: &str) -> Option<HexSearch> {
// handle special cases
if !is_hex(s) || s.len() > 64 {
#[must_use] pub fn hex_range(s: &str) -> Option<HexSearch> {
let mut hash_base = s.to_owned();
if !is_hex(&hash_base) || hash_base.len() > 64 {
return None;
}
if s.len() == 64 {
return Some(HexSearch::Exact(hex::decode(s).ok()?));
if hash_base.len() == 64 {
return Some(HexSearch::Exact(hex::decode(&hash_base).ok()?));
}
// if s is odd, add a zero
let mut hash_base = s.to_owned();
let mut odd = hash_base.len() % 2 != 0;
if odd {
// extend the string to make it even
@@ -57,8 +56,9 @@ pub fn hex_range(s: &str) -> Option<HexSearch> {
} else if odd {
// check if first char in this byte is NOT 'f'
if b < 240 {
upper[byte_len] = b + 16; // bump up the first character in this byte
// increment done, stop iterating through the vec
// bump up the first character in this byte
upper[byte_len] = b + 16;
// increment done, stop iterating through the vec
break;
}
// if it is 'f', reset the byte to 0 and do a carry

View File

@@ -35,9 +35,9 @@ impl From<config::Info> for RelayInfo {
description: i.description,
pubkey: i.pubkey,
contact: i.contact,
supported_nips: Some(vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 26]),
supported_nips: Some(vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 33]),
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
version: CARGO_PKG_VERSION.map(|x| x.to_owned()),
version: CARGO_PKG_VERSION.map(std::borrow::ToOwned::to_owned),
}
}
}

View File

@@ -1,3 +1,4 @@
pub mod cli;
pub mod close;
pub mod config;
pub mod conn;
@@ -8,8 +9,9 @@ pub mod event;
pub mod hexrange;
pub mod info;
pub mod nip05;
pub mod nauthz;
pub mod notice;
pub mod schema;
pub mod repo;
pub mod subscription;
pub mod utils;
// Public API for creating relays programatically

View File

@@ -1,50 +1,51 @@
//! Server process
use clap::Parser;
use nostr_rs_relay::cli::CLIArgs;
use nostr_rs_relay::config;
use nostr_rs_relay::server::start_server;
use std::env;
use std::sync::mpsc as syncmpsc;
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
use std::thread;
use tracing::info;
use console_subscriber::ConsoleLayer;
/// Return a requested DB name from command line arguments.
fn db_from_args(args: &[String]) -> Option<String> {
if args.len() == 3 && args.get(1) == Some(&"--db".to_owned()) {
return args.get(2).map(std::clone::Clone::clone);
}
None
}
/// Start running a Nostr relay server.
fn main() {
// setup tracing
let _trace_sub = tracing_subscriber::fmt::try_init();
info!("Starting up from main");
// get database directory from args
let args: Vec<String> = env::args().collect();
let db_dir: Option<String> = db_from_args(&args);
// configure settings from config.toml
// replace default settings with those read from config.toml
let mut settings = config::Settings::new();
let args = CLIArgs::parse();
// get config file name from args
let config_file_arg = args.config;
// configure settings from the config file (defaults to config.toml)
// replace default settings with those read from the config file
let mut settings = config::Settings::new(&config_file_arg);
// setup tracing
if settings.diagnostics.tracing {
// enable tracing with tokio-console
ConsoleLayer::builder().with_default_env().init();
} else {
// standard logging
tracing_subscriber::fmt::try_init().unwrap();
}
// update with database location
if let Some(db) = db_dir {
settings.database.data_directory = db;
}
info!("Starting up from main");
// get database directory from args
let db_dir_arg = args.db;
// update with database location from args, if provided
if let Some(db_dir) = db_dir_arg {
settings.database.data_directory = db_dir;
}
// we should have a 'control plane' channel to monitor and bump
// the server. this will let us do stuff like clear the database,
// shutdown, etc.; for now all this does is initiate shutdown if
// `()` is sent. This will change in the future, this is just a
// stopgap to shutdown the relay when it is used as a library.
let (_, ctrl_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
// run this in a new thread
let handle = thread::spawn(|| {
// we should have a 'control plane' channel to monitor and bump the server.
// this will let us do stuff like clear the database, shutdown, etc.
let _svr = start_server(settings, ctrl_rx);
let handle = thread::spawn(move || {
let _svr = start_server(&settings, ctrl_rx);
});
// block on nostr thread to finish.
handle.join().unwrap();

110
src/nauthz.rs Normal file
View File

@@ -0,0 +1,110 @@
use crate::error::{Error, Result};
use crate::{event::Event, nip05::Nip05Name};
use nauthz_grpc::authorization_client::AuthorizationClient;
use nauthz_grpc::event::TagEntry;
use nauthz_grpc::{Decision, Event as GrpcEvent, EventReply, EventRequest};
use tracing::{info, warn};
pub mod nauthz_grpc {
tonic::include_proto!("nauthz");
}
// A decision for the DB to act upon
pub trait AuthzDecision: Send + Sync {
fn permitted(&self) -> bool;
fn message(&self) -> Option<String>;
}
impl AuthzDecision for EventReply {
fn permitted(&self) -> bool {
self.decision == Decision::Permit as i32
}
fn message(&self) -> Option<String> {
self.message.clone()
}
}
// A connection to an event admission GRPC server
pub struct EventAuthzService {
server_addr: String,
conn: Option<AuthorizationClient<tonic::transport::Channel>>,
}
// conversion of Nip05Names into GRPC type
impl std::convert::From<Nip05Name> for nauthz_grpc::event_request::Nip05Name {
fn from(value: Nip05Name) -> Self {
nauthz_grpc::event_request::Nip05Name {
local: value.local.clone(),
domain: value.domain.clone(),
}
}
}
// conversion of event tags into gprc struct
fn tags_to_protobuf(tags: &Vec<Vec<String>>) -> Vec<TagEntry> {
tags.iter()
.map(|x| TagEntry { values: x.clone() })
.collect()
}
impl EventAuthzService {
pub async fn connect(server_addr: &str) -> EventAuthzService {
let mut eas = EventAuthzService {
server_addr: server_addr.to_string(),
conn: None,
};
eas.ready_connection().await;
eas
}
pub async fn ready_connection(self: &mut Self) {
if self.conn.is_none() {
let client = AuthorizationClient::connect(self.server_addr.to_string()).await;
if let Err(ref msg) = client {
warn!("could not connect to nostr authz GRPC server: {:?}", msg);
} else {
info!("connected to nostr authorization GRPC server");
}
self.conn = client.ok();
}
}
pub async fn admit_event(
self: &mut Self,
event: &Event,
ip: &str,
origin: Option<String>,
user_agent: Option<String>,
nip05: Option<Nip05Name>,
) -> Result<Box<dyn AuthzDecision>> {
self.ready_connection().await;
let id_blob = hex::decode(&event.id)?;
let pubkey_blob = hex::decode(&event.pubkey)?;
let sig_blob = hex::decode(&event.sig)?;
if let Some(ref mut c) = self.conn {
let gevent = GrpcEvent {
id: id_blob,
pubkey: pubkey_blob,
sig: sig_blob,
created_at: event.created_at,
kind: event.kind,
content: event.content.clone(),
tags: tags_to_protobuf(&event.tags),
};
let svr_res = c
.event_admit(EventRequest {
event: Some(gevent),
ip_addr: Some(ip.to_string()),
origin,
user_agent,
auth_pubkey: None,
nip05: nip05.map(|x| nauthz_grpc::event_request::Nip05Name::from(x)),
})
.await?;
let reply = svr_res.into_inner();
return Ok(Box::new(reply));
} else {
return Err(Error::AuthzError);
}
}
}

View File

@@ -5,16 +5,14 @@
//! consumes a stream of metadata events, and keeps a database table
//! updated with the current NIP-05 verification status.
use crate::config::VerifiedUsers;
use crate::db;
use crate::error::{Error, Result};
use crate::event::Event;
use crate::utils::unix_time;
use crate::repo::NostrRepo;
use std::sync::Arc;
use hyper::body::HttpBody;
use hyper::client::connect::HttpConnector;
use hyper::Client;
use hyper_tls::HttpsConnector;
use rand::Rng;
use rusqlite::params;
use std::time::Duration;
use std::time::Instant;
use std::time::SystemTime;
@@ -23,14 +21,12 @@ use tracing::{debug, info, warn};
/// NIP-05 verifier state
pub struct Verifier {
/// Repository for saving/retrieving events and records
repo: Arc<dyn NostrRepo>,
/// Metadata events for us to inspect
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
/// Newly validated events get written and then broadcast on this channel to subscribers
event_tx: tokio::sync::broadcast::Sender<Event>,
/// SQLite read query pool
read_pool: db::SqlitePool,
/// SQLite write query pool
write_pool: db::SqlitePool,
/// Settings
settings: crate::config::Settings,
/// HTTP client
@@ -46,13 +42,13 @@ pub struct Verifier {
/// A NIP-05 identifier is a local part and domain.
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct Nip05Name {
local: String,
domain: String,
pub local: String,
pub domain: String,
}
impl Nip05Name {
/// Does this name represent the entire domain?
pub fn is_domain_only(&self) -> bool {
#[must_use] pub fn is_domain_only(&self) -> bool {
self.local == "_"
}
@@ -62,8 +58,8 @@ impl Nip05Name {
"https://{}/.well-known/nostr.json?name={}",
self.domain, self.local
)
.parse::<http::Uri>()
.ok()
.parse::<http::Uri>()
.ok()
}
}
@@ -73,16 +69,11 @@ impl std::convert::TryFrom<&str> for Nip05Name {
fn try_from(inet: &str) -> Result<Self, Self::Error> {
// break full name at the @ boundary.
let components: Vec<&str> = inet.split('@').collect();
if components.len() != 2 {
Err(Error::CustomError("too many/few components".to_owned()))
} else {
if components.len() == 2 {
// check if local name is valid
let local = components[0];
let domain = components[1];
if local
.chars()
.all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.')
{
if local.chars().all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.') {
if domain
.chars()
.all(|x| x.is_alphanumeric() || x == '-' || x == '.')
@@ -101,6 +92,8 @@ impl std::convert::TryFrom<&str> for Nip05Name {
"invalid character in local part".to_owned(),
))
}
} else {
Err(Error::CustomError("too many/few components".to_owned()))
}
}
}
@@ -111,55 +104,30 @@ impl std::fmt::Display for Nip05Name {
}
}
// Current time, with a slight foward jitter in seconds
fn now_jitter(sec: u64) -> u64 {
// random time between now, and 10min in future.
let mut rng = rand::thread_rng();
let jitter_amount = rng.gen_range(0..sec);
let now = unix_time();
now.saturating_add(jitter_amount)
}
/// Check if the specified username and address are present and match in this response body
fn body_contains_user(username: &str, address: &str, bytes: hyper::body::Bytes) -> Result<bool> {
fn body_contains_user(username: &str, address: &str, bytes: &hyper::body::Bytes) -> Result<bool> {
// convert the body into json
let body: serde_json::Value = serde_json::from_slice(&bytes)?;
let body: serde_json::Value = serde_json::from_slice(bytes)?;
// ensure we have a names object.
let names_map = body
.as_object()
.and_then(|x| x.get("names"))
.and_then(|x| x.as_object())
.and_then(serde_json::Value::as_object)
.ok_or_else(|| Error::CustomError("not a map".to_owned()))?;
// get the pubkey for the requested user
let check_name = names_map.get(username).and_then(|x| x.as_str());
let check_name = names_map.get(username).and_then(serde_json::Value::as_str);
// ensure the address is a match
Ok(check_name.map(|x| x == address).unwrap_or(false))
Ok(check_name.map_or(false, |x| x == address))
}
impl Verifier {
pub fn new(
repo: Arc<dyn NostrRepo>,
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
event_tx: tokio::sync::broadcast::Sender<Event>,
settings: crate::config::Settings,
) -> Result<Self> {
info!("creating NIP-05 verifier");
// build a database connection for reading and writing.
let write_pool = db::build_pool(
"nip05 writer",
&settings,
rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE,
1, // min conns
4, // max conns
true, // wait for DB
);
let read_pool = db::build_pool(
"nip05 reader",
&settings,
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
1, // min conns
8, // max conns
true, // wait for DB
);
// setup hyper client
let https = HttpsConnector::new();
let client = Client::builder().build::<_, hyper::Body>(https);
@@ -175,10 +143,9 @@ impl Verifier {
// duration.
let reverify_interval = tokio::time::interval(http_wait_duration);
Ok(Verifier {
repo,
metadata_rx,
event_tx,
read_pool,
write_pool,
settings,
client,
wait_after_finish,
@@ -246,44 +213,40 @@ impl Verifier {
let response_fut = self.client.request(req);
// HTTP request with timeout
match tokio::time::timeout(Duration::from_secs(5), response_fut).await {
Ok(response_res) => {
// limit size of verification document to 1MB.
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
let response = response_res?;
// determine content length from response
let response_content_length = match response.body().size_hint().upper() {
Some(v) => v,
None => MAX_ALLOWED_RESPONSE_SIZE + 1, // reject missing content length
};
// TODO: test how hyper handles the client providing an inaccurate content-length.
if response_content_length <= MAX_ALLOWED_RESPONSE_SIZE {
let (parts, body) = response.into_parts();
// TODO: consider redirects
if parts.status == http::StatusCode::OK {
// parse body, determine if the username / key / address is present
let body_bytes = hyper::body::to_bytes(body).await?;
let body_matches = body_contains_user(&nip.local, pubkey, body_bytes)?;
if body_matches {
return Ok(UserWebVerificationStatus::Verified);
}
// successful response, parsed as a nip-05
// document, but this name/pubkey was not
// present.
return Ok(UserWebVerificationStatus::Unverified);
if let Ok(response_res) = tokio::time::timeout(Duration::from_secs(5), response_fut).await {
// limit size of verification document to 1MB.
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
let response = response_res?;
// determine content length from response
let response_content_length = match response.body().size_hint().upper() {
Some(v) => v,
None => MAX_ALLOWED_RESPONSE_SIZE + 1, // reject missing content length
};
// TODO: test how hyper handles the client providing an inaccurate content-length.
if response_content_length <= MAX_ALLOWED_RESPONSE_SIZE {
let (parts, body) = response.into_parts();
// TODO: consider redirects
if parts.status == http::StatusCode::OK {
// parse body, determine if the username / key / address is present
let body_bytes = hyper::body::to_bytes(body).await?;
let body_matches = body_contains_user(&nip.local, pubkey, &body_bytes)?;
if body_matches {
return Ok(UserWebVerificationStatus::Verified);
}
} else {
info!(
"content length missing or exceeded limits for account: {:?}",
nip.to_string()
);
// successful response, parsed as a nip-05
// document, but this name/pubkey was not
// present.
return Ok(UserWebVerificationStatus::Unverified);
}
} else {
info!(
"content length missing or exceeded limits for account: {:?}",
nip.to_string()
);
}
Err(_) => {
info!("timeout verifying account {:?}", nip);
return Ok(UserWebVerificationStatus::Unknown);
}
} else {
info!("timeout verifying account {:?}", nip);
return Ok(UserWebVerificationStatus::Unknown);
}
Ok(UserWebVerificationStatus::Unknown)
}
@@ -294,8 +257,15 @@ impl Verifier {
// run a loop, restarting on failure
loop {
let res = self.run_internal().await;
if let Err(e) = res {
match res {
Err(Error::ChannelClosed) => {
// channel was closed, we are shutting down
return;
},
Err(e) => {
info!("error in verifier: {:?}", e);
},
_ => {}
}
}
}
@@ -309,7 +279,7 @@ impl Verifier {
if let Some(naddr) = e.get_nip05_addr() {
info!("got metadata event for ({:?},{:?})", naddr.to_string() ,e.get_author_prefix());
// Process a new author, checking if they are verified:
let check_verified = get_latest_user_verification(self.read_pool.get().expect("could not get connection"), &e.pubkey).await;
let check_verified = self.repo.get_latest_user_verification(&e.pubkey).await;
// ensure the event we got is more recent than the one we have, otherwise we can ignore it.
if let Ok(last_check) = check_verified {
if e.created_at <= last_check.event_created {
@@ -342,6 +312,7 @@ impl Verifier {
}
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
info!("metadata broadcast channel closed");
return Err(Error::ChannelClosed);
}
}
},
@@ -370,7 +341,7 @@ impl Verifier {
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
.unwrap_or(0);
let vr = get_oldest_user_verification(self.read_pool.get()?, earliest_epoch).await;
let vr = self.repo.get_oldest_user_verification(earliest_epoch).await;
match vr {
Ok(ref v) => {
let new_status = self.get_web_verification(&v.name, &v.address).await;
@@ -378,34 +349,37 @@ impl Verifier {
UserWebVerificationStatus::Verified => {
// freshly verified account, update the
// timestamp.
self.update_verification_record(self.write_pool.get()?, v)
self.repo.update_verification_timestamp(v.rowid)
.await?;
info!("verification updated for {}", v.to_string());
}
UserWebVerificationStatus::DomainNotAllowed
| UserWebVerificationStatus::Unknown => {
// server may be offline, or temporarily
// blocked by the config file. Note the
// failure so we can process something
// else.
| UserWebVerificationStatus::Unknown => {
// server may be offline, or temporarily
// blocked by the config file. Note the
// failure so we can process something
// else.
// have we had enough failures to give up?
if v.failure_count >= max_failures as u64 {
info!(
"giving up on verifying {:?} after {} failures",
v.name, v.failure_count
);
self.delete_verification_record(self.write_pool.get()?, v)
.await?;
} else {
// record normal failure, incrementing failure count
self.fail_verification_record(self.write_pool.get()?, v)
.await?;
}
}
// have we had enough failures to give up?
if v.failure_count >= max_failures as u64 {
info!(
"giving up on verifying {:?} after {} failures",
v.name, v.failure_count
);
self.repo.delete_verification(v.rowid)
.await?;
} else {
// record normal failure, incrementing failure count
info!("verification failed for {}", v.to_string());
self.repo.fail_verification(v.rowid).await?;
}
}
UserWebVerificationStatus::Unverified => {
// domain has removed the verification, drop
// the record on our side.
self.delete_verification_record(self.write_pool.get()?, v)
info!("verification rescinded for {}", v.to_string());
self.repo.delete_verification(v.rowid)
.await?;
}
}
@@ -426,80 +400,6 @@ impl Verifier {
Ok(())
}
/// Reset the verification timestamp on a VerificationRecord
pub async fn update_verification_record(
&mut self,
mut conn: db::PooledConnection,
vr: &VerificationRecord,
) -> Result<()> {
let vr_id = vr.rowid;
let vr_str = vr.to_string();
tokio::task::spawn_blocking(move || {
// add some jitter to the verification to prevent everything from stacking up together.
let verif_time = now_jitter(600);
let tx = conn.transaction()?;
{
// update verification time and reset any failure count
let query =
"UPDATE user_verification SET verified_at=?, failure_count=0 WHERE id=?";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![verif_time, vr_id])?;
}
tx.commit()?;
info!("verification updated for {}", vr_str);
let ok: Result<()> = Ok(());
ok
})
.await?
}
/// Reset the failure timestamp on a VerificationRecord
pub async fn fail_verification_record(
&mut self,
mut conn: db::PooledConnection,
vr: &VerificationRecord,
) -> Result<()> {
let vr_id = vr.rowid;
let vr_str = vr.to_string();
let fail_count = vr.failure_count.saturating_add(1);
tokio::task::spawn_blocking(move || {
// add some jitter to the verification to prevent everything from stacking up together.
let fail_time = now_jitter(600);
let tx = conn.transaction()?;
{
let query = "UPDATE user_verification SET failed_at=?, failure_count=? WHERE id=?";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![fail_time, fail_count, vr_id])?;
}
tx.commit()?;
info!("verification failed for {}", vr_str);
let ok: Result<()> = Ok(());
ok
})
.await?
}
/// Delete a VerificationRecord that is no longer valid
pub async fn delete_verification_record(
&mut self,
mut conn: db::PooledConnection,
vr: &VerificationRecord,
) -> Result<()> {
let vr_id = vr.rowid;
let vr_str = vr.to_string();
tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
{
let query = "DELETE FROM user_verification WHERE id=?;";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![vr_id])?;
}
tx.commit()?;
info!("verification rescinded for {}", vr_str);
let ok: Result<()> = Ok(());
ok
})
.await?
}
/// Persist an event, create a verification record, and broadcast.
// TODO: have more event-writing logic handled in the db module.
// Right now, these events avoid the rate limit. That is
@@ -513,11 +413,11 @@ impl Verifier {
// disabled/passive, the event has already been persisted.
let should_write_event = self.settings.verified_users.is_enabled();
if should_write_event {
match db::write_event(&mut self.write_pool.get()?, event) {
match self.repo.write_event(event).await {
Ok(updated) => {
if updated != 0 {
info!(
"persisted event: {:?} in {:?}",
"persisted event (new verified pubkey): {:?} in {:?}",
event.get_event_id_prefix(),
start.elapsed()
);
@@ -533,7 +433,7 @@ impl Verifier {
}
}
// write the verification record
save_verification_record(self.write_pool.get()?, event, name).await?;
self.repo.create_verification_record(&event.id, name).await?;
Ok(())
}
}
@@ -563,7 +463,7 @@ pub struct VerificationRecord {
/// Check with settings to determine if a given domain is allowed to
/// publish.
pub fn is_domain_allowed(
#[must_use] pub fn is_domain_allowed(
domain: &str,
whitelist: &Option<Vec<String>>,
blacklist: &Option<Vec<String>>,
@@ -583,7 +483,7 @@ pub fn is_domain_allowed(
impl VerificationRecord {
/// Check if the record is recent enough to be considered valid,
/// and the domain is allowed.
pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
#[must_use] pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
//let settings = SETTINGS.read().unwrap();
// how long a verification record is good for
let nip05_expiration = &verified_users_settings.verify_expiration_duration;
@@ -630,130 +530,6 @@ impl std::fmt::Display for VerificationRecord {
}
}
/// Create a new verification record based on an event
pub async fn save_verification_record(
mut conn: db::PooledConnection,
event: &Event,
name: &str,
) -> Result<()> {
let e = hex::decode(&event.id).ok();
let n = name.to_owned();
let a_prefix = event.get_author_prefix();
tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
{
// if we create a /new/ one, we should get rid of any old ones. or group the new ones by name and only consider the latest.
let query = "INSERT INTO user_verification (metadata_event, name, verified_at) VALUES ((SELECT id from event WHERE event_hash=?), ?, strftime('%s','now'));";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![e, n])?;
// get the row ID
let v_id = tx.last_insert_rowid();
// delete everything else by this name
let del_query = "DELETE FROM user_verification WHERE name = ? AND id != ?;";
let mut del_stmt = tx.prepare(del_query)?;
let count = del_stmt.execute(params![n,v_id])?;
if count > 0 {
info!("removed {} old verification records for ({:?},{:?})", count, n, a_prefix);
}
}
tx.commit()?;
info!("saved new verification record for ({:?},{:?})", n, a_prefix);
let ok: Result<()> = Ok(());
ok
}).await?
}
/// Retrieve the most recent verification record for a given pubkey (async).
pub async fn get_latest_user_verification(
conn: db::PooledConnection,
pubkey: &str,
) -> Result<VerificationRecord> {
let p = pubkey.to_owned();
tokio::task::spawn_blocking(move || query_latest_user_verification(conn, p)).await?
}
/// Query database for the latest verification record for a given pubkey.
pub fn query_latest_user_verification(
mut conn: db::PooledConnection,
pubkey: String,
) -> Result<VerificationRecord> {
let tx = conn.transaction()?;
let query = "SELECT v.id, v.name, e.event_hash, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE e.author=? ORDER BY e.created_at DESC, v.verified_at DESC, v.failed_at DESC LIMIT 1;";
let mut stmt = tx.prepare_cached(query)?;
let fields = stmt.query_row(params![hex::decode(&pubkey).ok()], |r| {
let rowid: u64 = r.get(0)?;
let rowname: String = r.get(1)?;
let eventid: Vec<u8> = r.get(2)?;
let created_at: u64 = r.get(3)?;
// create a tuple since we can't throw non-rusqlite errors in this closure
Ok((
rowid,
rowname,
eventid,
created_at,
r.get(4).ok(),
r.get(5).ok(),
r.get(6)?,
))
})?;
Ok(VerificationRecord {
rowid: fields.0,
name: Nip05Name::try_from(&fields.1[..])?,
address: pubkey,
event: hex::encode(fields.2),
event_created: fields.3,
last_success: fields.4,
last_failure: fields.5,
failure_count: fields.6,
})
}
/// Retrieve the oldest user verification (async)
pub async fn get_oldest_user_verification(
conn: db::PooledConnection,
earliest: u64,
) -> Result<VerificationRecord> {
tokio::task::spawn_blocking(move || query_oldest_user_verification(conn, earliest)).await?
}
pub fn query_oldest_user_verification(
mut conn: db::PooledConnection,
earliest: u64,
) -> Result<VerificationRecord> {
let tx = conn.transaction()?;
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
let mut stmt = tx.prepare_cached(query)?;
let fields = stmt.query_row(params![earliest, earliest], |r| {
let rowid: u64 = r.get(0)?;
let rowname: String = r.get(1)?;
let eventid: Vec<u8> = r.get(2)?;
let pubkey: Vec<u8> = r.get(3)?;
let created_at: u64 = r.get(4)?;
// create a tuple since we can't throw non-rusqlite errors in this closure
Ok((
rowid,
rowname,
eventid,
pubkey,
created_at,
r.get(5).ok(),
r.get(6).ok(),
r.get(7)?,
))
})?;
let vr = VerificationRecord {
rowid: fields.0,
name: Nip05Name::try_from(&fields.1[..])?,
address: hex::encode(fields.3),
event: hex::encode(fields.2),
event_created: fields.4,
last_success: fields.5,
last_failure: fields.6,
failure_count: fields.7,
};
Ok(vr)
}
#[cfg(test)]
mod tests {
use super::*;
@@ -762,7 +538,7 @@ mod tests {
fn local_from_inet() {
let addr = "bob@example.com";
let parsed = Nip05Name::try_from(addr);
assert!(!parsed.is_err());
assert!(parsed.is_ok());
let v = parsed.unwrap();
assert_eq!(v.local, "bob");
assert_eq!(v.domain, "example.com");

View File

@@ -19,18 +19,14 @@ pub enum Notice {
}
impl EventResultStatus {
pub fn to_bool(&self) -> bool {
#[must_use] pub fn to_bool(&self) -> bool {
match self {
Self::Saved => true,
Self::Duplicate => true,
Self::Invalid => false,
Self::Blocked => false,
Self::RateLimited => false,
Self::Error => false,
Self::Duplicate | Self::Saved => true,
Self::Invalid |Self::Blocked | Self::RateLimited | Self::Error => false,
}
}
pub fn prefix(&self) -> &'static str {
#[must_use] pub fn prefix(&self) -> &'static str {
match self {
Self::Saved => "saved",
Self::Duplicate => "duplicate",
@@ -47,7 +43,7 @@ impl Notice {
// Notice::err_msg(format!("{}", err), id)
//}
pub fn message(msg: String) -> Notice {
#[must_use] pub fn message(msg: String) -> Notice {
Notice::Message(msg)
}
@@ -56,27 +52,27 @@ impl Notice {
Notice::EventResult(EventResult { id, msg, status })
}
pub fn invalid(id: String, msg: &str) -> Notice {
#[must_use] pub fn invalid(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Invalid)
}
pub fn blocked(id: String, msg: &str) -> Notice {
#[must_use] pub fn blocked(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Blocked)
}
pub fn rate_limited(id: String, msg: &str) -> Notice {
#[must_use] pub fn rate_limited(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::RateLimited)
}
pub fn duplicate(id: String) -> Notice {
#[must_use] pub fn duplicate(id: String) -> Notice {
Notice::prefixed(id, "", EventResultStatus::Duplicate)
}
pub fn error(id: String, msg: &str) -> Notice {
#[must_use] pub fn error(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Error)
}
pub fn saved(id: String) -> Notice {
#[must_use] pub fn saved(id: String) -> Notice {
Notice::EventResult(EventResult {
id,
msg: "".into(),

69
src/repo/mod.rs Normal file
View File

@@ -0,0 +1,69 @@
use crate::db::QueryResult;
use crate::error::Result;
use crate::event::Event;
use crate::nip05::VerificationRecord;
use crate::subscription::Subscription;
use crate::utils::unix_time;
use async_trait::async_trait;
use rand::Rng;
pub mod sqlite;
pub mod sqlite_migration;
pub mod postgres;
pub mod postgres_migration;
#[async_trait]
pub trait NostrRepo: Send + Sync {
/// Start the repository (any initialization or maintenance tasks can be kicked off here)
async fn start(&self) -> Result<()>;
/// Run migrations and return current version
async fn migrate_up(&self) -> Result<usize>;
/// Persist event to database
async fn write_event(&self, e: &Event) -> Result<u64>;
/// Perform a database query using a subscription.
///
/// The [`Subscription`] is converted into a SQL query. Each result
/// is published on the `query_tx` channel as it is returned. If a
/// message becomes available on the `abandon_query_rx` channel, the
/// query is immediately aborted.
async fn query_subscription(
&self,
sub: Subscription,
client_id: String,
query_tx: tokio::sync::mpsc::Sender<QueryResult>,
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
) -> Result<()>;
/// Perform normal maintenance
async fn optimize_db(&self) -> Result<()>;
/// Create a new verification record connected to a specific event
async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()>;
/// Update verification timestamp
async fn update_verification_timestamp(&self, id: u64) -> Result<()>;
/// Update verification record as failed
async fn fail_verification(&self, id: u64) -> Result<()>;
/// Delete verification record
async fn delete_verification(&self, id: u64) -> Result<()>;
/// Get the latest verification record for a given pubkey.
async fn get_latest_user_verification(&self, pub_key: &str) -> Result<VerificationRecord>;
/// Get oldest verification before timestamp
async fn get_oldest_user_verification(&self, before: u64) -> Result<VerificationRecord>;
}
// Current time, with a slight forward jitter in seconds
pub(crate) fn now_jitter(sec: u64) -> u64 {
// random time between now, and 10min in future.
let mut rng = rand::thread_rng();
let jitter_amount = rng.gen_range(0..sec);
let now = unix_time();
now.saturating_add(jitter_amount)
}

741
src/repo/postgres.rs Normal file
View File

@@ -0,0 +1,741 @@
use crate::db::QueryResult;
use crate::error::Result;
use crate::event::{single_char_tagname, Event};
use crate::nip05::{Nip05Name, VerificationRecord};
use crate::repo::{now_jitter, NostrRepo};
use crate::subscription::{ReqFilter, Subscription};
use async_std::stream::StreamExt;
use async_trait::async_trait;
use chrono::{DateTime, TimeZone, Utc};
use sqlx::postgres::PgRow;
use sqlx::{Error, Execute, FromRow, Postgres, QueryBuilder, Row};
use std::time::{Duration, Instant};
use sqlx::Error::RowNotFound;
use crate::hexrange::{hex_range, HexSearch};
use crate::repo::postgres_migration::run_migrations;
use crate::server::NostrMetrics;
use crate::utils::{is_hex, is_lower_hex};
use tokio::sync::mpsc::Sender;
use tokio::sync::oneshot::Receiver;
use tracing::log::trace;
use tracing::{debug, error, info};
use crate::error;
pub type PostgresPool = sqlx::pool::Pool<Postgres>;
pub struct PostgresRepo {
conn: PostgresPool,
metrics: NostrMetrics,
}
impl PostgresRepo {
pub fn new(c: PostgresPool, m: NostrMetrics) -> PostgresRepo {
PostgresRepo {
conn: c,
metrics: m,
}
}
}
#[async_trait]
impl NostrRepo for PostgresRepo {
async fn start(&self) -> Result<()> {
info!("not implemented");
Ok(())
}
async fn migrate_up(&self) -> Result<usize> {
Ok(run_migrations(&self.conn).await?)
}
async fn write_event(&self, e: &Event) -> Result<u64> {
// start transaction
let mut tx = self.conn.begin().await?;
let start = Instant::now();
// get relevant fields from event and convert to blobs.
let id_blob = hex::decode(&e.id).ok();
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
let delegator_blob: Option<Vec<u8>> =
e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
let event_str = serde_json::to_string(&e).unwrap();
// determine if this event would be shadowed by an existing
// replaceable event or parameterized replaceable event.
if e.is_replaceable() {
let repl_count = sqlx::query(
"SELECT e.id FROM event e WHERE e.pub_key=? AND e.kind=? AND e.created_at >= ? LIMIT 1;")
.bind(&pubkey_blob)
.bind(e.kind as i64)
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
.fetch_optional(&mut tx)
.await?;
if repl_count.is_some() {
return Ok(0);
}
}
if let Some(d_tag) = e.distinct_param() {
let repl_count:i64 = if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) {
sqlx::query_scalar(
"SELECT count(*) AS count FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.pub_key=$1 AND e.kind=$2 AND t.name='d' AND t.value_hex=$3 AND e.created_at >= $4 LIMIT 1;")
.bind(hex::decode(&e.pubkey).ok())
.bind(e.kind as i64)
.bind(hex::decode(d_tag).ok())
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
.fetch_one(&mut tx)
.await?
} else {
sqlx::query_scalar(
"SELECT count(*) AS count FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.pub_key=$1 AND e.kind=$2 AND t.name='d' AND t.value=$3 AND e.created_at >= $4 LIMIT 1;")
.bind(hex::decode(&e.pubkey).ok())
.bind(e.kind as i64)
.bind(d_tag.as_bytes())
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
.fetch_one(&mut tx)
.await?
};
// if any rows were returned, then some newer event with
// the same author/kind/tag value exist, and we can ignore
// this event.
if repl_count > 0 {
return Ok(0)
}
}
// ignore if the event hash is a duplicate.
let mut ins_count = sqlx::query(
r#"INSERT INTO "event"
(id, pub_key, created_at, kind, "content", delegated_by)
VALUES($1, $2, $3, $4, $5, $6)
ON CONFLICT (id) DO NOTHING"#,
)
.bind(&id_blob)
.bind(&pubkey_blob)
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
.bind(e.kind as i64)
.bind(event_str.into_bytes())
.bind(delegator_blob)
.execute(&mut tx)
.await?
.rows_affected();
if ins_count == 0 {
// if the event was a duplicate, no need to insert event or
// pubkey references. This will abort the txn.
return Ok(0);
}
// add all tags to the tag table
for tag in e.tags.iter() {
// ensure we have 2 values.
if tag.len() >= 2 {
let tag_name = &tag[0];
let tag_val = &tag[1];
// only single-char tags are searchable
let tag_char_opt = single_char_tagname(tag_name);
let query = "INSERT INTO tag (event_id, \"name\", value) VALUES($1, $2, $3) \
ON CONFLICT (event_id, \"name\", value) DO NOTHING";
match &tag_char_opt {
Some(_) => {
// if tag value is lowercase hex;
if is_lower_hex(tag_val) && (tag_val.len() % 2 == 0) {
sqlx::query(query)
.bind(&id_blob)
.bind(tag_name)
.bind(hex::decode(tag_val).ok())
.execute(&mut tx)
.await?;
} else {
sqlx::query(query)
.bind(&id_blob)
.bind(tag_name)
.bind(tag_val.as_bytes())
.execute(&mut tx)
.await?;
}
}
None => {}
}
}
}
if e.is_replaceable() {
let update_count = sqlx::query("DELETE FROM \"event\" WHERE kind=$1 and pub_key = $2 and id not in (select id from \"event\" where kind=$1 and pub_key=$2 order by created_at desc limit 1);")
.bind(e.kind as i64)
.bind(hex::decode(&e.pubkey).ok())
.execute(&mut tx)
.await?.rows_affected();
if update_count > 0 {
info!(
"hid {} older replaceable kind {} events for author: {:?}",
update_count,
e.kind,
e.get_author_prefix()
);
}
}
// parameterized replaceable events
// check for parameterized replaceable events that would be hidden; don't insert these either.
if let Some(d_tag) = e.distinct_param() {
let update_count = if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) {
sqlx::query("DELETE FROM event WHERE kind=$1 AND pub_key=$2 AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=$1 AND e.pub_key=$2 AND t.name='d' AND t.value_hex=$3 ORDER BY created_at DESC OFFSET 1);")
.bind(e.kind as i64)
.bind(hex::decode(&e.pubkey).ok())
.bind(hex::decode(d_tag).ok())
.execute(&mut tx)
.await?.rows_affected()
} else {
sqlx::query("DELETE FROM event WHERE kind=$1 AND pub_key=$2 AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=$1 AND e.pub_key=$2 AND t.name='d' AND t.value=$3 ORDER BY created_at DESC OFFSET 1);")
.bind(e.kind as i64)
.bind(hex::decode(&e.pubkey).ok())
.bind(d_tag.as_bytes())
.execute(&mut tx)
.await?.rows_affected()
};
if update_count > 0 {
info!(
"removed {} older parameterized replaceable kind {} events for author: {:?}",
update_count,
e.kind,
e.get_author_prefix()
);
}
}
// if this event is a deletion, hide the referenced events from the same author.
if e.kind == 5 {
let event_candidates = e.tag_values_by_name("e");
let pub_keys: Vec<Vec<u8>> = event_candidates
.iter()
.filter(|x| is_hex(x) && x.len() == 64)
.filter_map(|x| hex::decode(x).ok())
.collect();
let mut builder = QueryBuilder::new(
"UPDATE \"event\" SET hidden = 1::bit(1) WHERE kind != 5 AND pub_key = ",
);
builder.push_bind(hex::decode(&e.pubkey).ok());
builder.push(" AND id IN (");
let mut sep = builder.separated(", ");
for pk in pub_keys {
sep.push_bind(pk);
}
sep.push_unseparated(")");
let update_count = builder.build().execute(&mut tx).await?.rows_affected();
info!(
"hid {} deleted events for author {:?}",
update_count,
e.get_author_prefix()
);
} else {
// check if a deletion has already been recorded for this event.
// Only relevant for non-deletion events
let del_count = sqlx::query(
"SELECT e.id FROM \"event\" e \
LEFT JOIN tag t ON e.id = t.event_id \
WHERE e.pub_key = $1 AND t.\"name\" = 'e' AND e.kind = 5 AND t.value = $2 LIMIT 1",
)
.bind(&pubkey_blob)
.bind(&id_blob)
.fetch_optional(&mut tx)
.await?;
// check if a the query returned a result, meaning we should
// hid the current event
if del_count.is_some() {
// a deletion already existed, mark original event as hidden.
info!(
"hid event: {:?} due to existing deletion by author: {:?}",
e.get_event_id_prefix(),
e.get_author_prefix()
);
sqlx::query("UPDATE \"event\" SET hidden = 1::bit(1) WHERE id = $1")
.bind(&id_blob)
.execute(&mut tx)
.await?;
// event was deleted, so let caller know nothing new
// arrived, preventing this from being sent to active
// subscriptions
ins_count = 0;
}
}
tx.commit().await?;
self.metrics
.write_events
.observe(start.elapsed().as_secs_f64());
Ok(ins_count)
}
async fn query_subscription(
&self,
sub: Subscription,
client_id: String,
query_tx: Sender<QueryResult>,
mut abandon_query_rx: Receiver<()>,
) -> Result<()> {
let start = Instant::now();
let mut row_count: usize = 0;
let metrics = &self.metrics;
for filter in sub.filters.iter() {
let start = Instant::now();
// generate SQL query
let q_filter = query_from_filter(filter);
if q_filter.is_none() {
debug!("Failed to generate query!");
continue;
}
debug!("SQL generated in {:?}", start.elapsed());
// cutoff for displaying slow queries
let slow_cutoff = Duration::from_millis(2000);
// any client that doesn't cause us to generate new rows in 5
// seconds gets dropped.
let abort_cutoff = Duration::from_secs(5);
let start = Instant::now();
let mut slow_first_event;
let mut last_successful_send = Instant::now();
// execute the query. Don't cache, since queries vary so much.
let mut q_filter = q_filter.unwrap();
let q_build = q_filter.build();
let sql = q_build.sql();
let mut results = q_build.fetch(&self.conn);
let mut first_result = true;
while let Some(row) = results.next().await {
if let Err(e) = row {
error!("Query failed: {} {} {:?}", e, sql, filter);
break;
}
let first_event_elapsed = start.elapsed();
slow_first_event = first_event_elapsed >= slow_cutoff;
if first_result {
debug!(
"first result in {:?} (cid: {}, sub: {:?})",
first_event_elapsed, client_id, sub.id
);
first_result = false;
}
// logging for slow queries; show sub and SQL.
// to reduce logging; only show 1/16th of clients (leading 0)
if slow_first_event && client_id.starts_with("00") {
debug!(
"query req (slow): {:?} (cid: {}, sub: {:?})",
&sub, client_id, sub.id
);
} else {
trace!(
"query req: {:?} (cid: {}, sub: {:?})",
&sub,
client_id,
sub.id
);
}
// check if this is still active; every 100 rows
if row_count % 100 == 0 && abandon_query_rx.try_recv().is_ok() {
debug!("query cancelled by client (cid: {}, sub: {:?})", client_id, sub.id);
return Ok(());
}
row_count += 1;
let event_json: Vec<u8> = row.unwrap().get(0);
loop {
if query_tx.capacity() != 0 {
// we have capacity to add another item
break;
} else {
// the queue is full
trace!("db reader thread is stalled");
if last_successful_send + abort_cutoff < Instant::now() {
// the queue has been full for too long, abort
info!("aborting database query due to slow client");
metrics.query_aborts.with_label_values(&["slowclient"]).inc();
return Ok(());
}
// give the queue a chance to clear before trying again
async_std::task::sleep(Duration::from_millis(100)).await;
}
}
// TODO: we could use try_send, but we'd have to juggle
// getting the query result back as part of the error
// result.
query_tx
.send(QueryResult {
sub_id: sub.get_id(),
event: String::from_utf8(event_json).unwrap(),
})
.await
.ok();
last_successful_send = Instant::now();
}
}
query_tx
.send(QueryResult {
sub_id: sub.get_id(),
event: "EOSE".to_string(),
})
.await
.ok();
self.metrics
.query_sub
.observe(start.elapsed().as_secs_f64());
debug!(
"query completed in {:?} (cid: {}, sub: {:?}, db_time: {:?}, rows: {})",
start.elapsed(),
client_id,
sub.id,
start.elapsed(),
row_count
);
Ok(())
}
async fn optimize_db(&self) -> Result<()> {
// Not implemented
Ok(())
}
async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()> {
let mut tx = self.conn.begin().await?;
sqlx::query("DELETE FROM user_verification WHERE \"name\" = $1")
.bind(name)
.execute(&mut tx)
.await?;
sqlx::query("INSERT INTO user_verification (event_id, \"name\", verified_at) VALUES ($1, $2, now())")
.bind(hex::decode(event_id).ok())
.bind(name)
.execute(&mut tx)
.await?;
tx.commit().await?;
info!("saved new verification record for ({:?})", name);
Ok(())
}
async fn update_verification_timestamp(&self, id: u64) -> Result<()> {
// add some jitter to the verification to prevent everything from stacking up together.
let verify_time = now_jitter(600);
// update verification time and reset any failure count
sqlx::query(
"UPDATE user_verification SET verified_at = $1, fail_count = 0 WHERE id = $2",
)
.bind(Utc.timestamp_opt(verify_time as i64, 0).unwrap())
.bind(id as i64)
.execute(&self.conn)
.await?;
info!("verification updated for {}", id);
Ok(())
}
async fn fail_verification(&self, id: u64) -> Result<()> {
sqlx::query("UPDATE user_verification SET failed_at = now(), fail_count = fail_count + 1 WHERE id = $1")
.bind(id as i64)
.execute(&self.conn)
.await?;
Ok(())
}
async fn delete_verification(&self, id: u64) -> Result<()> {
sqlx::query("DELETE FROM user_verification WHERE id = $1")
.bind(id as i64)
.execute(&self.conn)
.await?;
Ok(())
}
async fn get_latest_user_verification(&self, pub_key: &str) -> Result<VerificationRecord> {
let query = r#"SELECT
v.id,
v."name",
e.id as event_id,
e.pub_key,
e.created_at,
v.verified_at,
v.failed_at,
v.fail_count
FROM user_verification v
LEFT JOIN "event" e ON e.id = v.event_id
WHERE e.pub_key = $1
ORDER BY e.created_at DESC, v.verified_at DESC, v.failed_at DESC
LIMIT 1"#;
sqlx::query_as::<_, VerificationRecord>(query)
.bind(hex::decode(pub_key).ok())
.fetch_optional(&self.conn)
.await?
.ok_or(error::Error::SqlxError(RowNotFound))
}
async fn get_oldest_user_verification(&self, before: u64) -> Result<VerificationRecord> {
let query = r#"SELECT
v.id,
v."name",
e.id as event_id,
e.pub_key,
e.created_at,
v.verified_at,
v.failed_at,
v.fail_count
FROM user_verification v
LEFT JOIN "event" e ON e.id = v.event_id
WHERE (v.verified_at < $1 OR v.verified_at IS NULL)
AND (v.failed_at < $1 OR v.failed_at IS NULL)
ORDER BY v.verified_at ASC, v.failed_at ASC
LIMIT 1"#;
sqlx::query_as::<_, VerificationRecord>(query)
.bind(Utc.timestamp_opt(before as i64, 0).unwrap())
.fetch_optional(&self.conn)
.await?
.ok_or(error::Error::SqlxError(RowNotFound))
}
}
/// Create a dynamic SQL query and params from a subscription filter.
fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
// if the filter is malformed, don't return anything.
if f.force_no_match {
return None;
}
let mut query = QueryBuilder::new("SELECT e.\"content\", e.created_at FROM \"event\" e WHERE ");
let mut push_and = false;
// Query for "authors", allowing prefix matches
if let Some(auth_vec) = &f.authors {
// filter out non-hex values
let auth_vec: Vec<&String> = auth_vec.iter().filter(|a| is_hex(a)).collect();
if !auth_vec.is_empty() {
query.push("(");
// shortcut authors into "IN" query
let any_is_range = auth_vec.iter().any(|pk| pk.len() != 64);
if !any_is_range {
query.push("e.pub_key in (");
let mut pk_sep = query.separated(", ");
for pk in auth_vec.iter() {
pk_sep.push_bind(hex::decode(pk).ok());
}
query.push(") OR e.delegated_by in (");
let mut pk_delegated_sep = query.separated(", ");
for pk in auth_vec.iter() {
pk_delegated_sep.push_bind(hex::decode(pk).ok());
}
query.push(")");
push_and = true;
} else {
let mut range_authors = query.separated(" OR ");
for auth in auth_vec {
match hex_range(auth) {
Some(HexSearch::Exact(ex)) => {
range_authors
.push("(e.pub_key = ")
.push_bind_unseparated(ex.clone())
.push_unseparated(" OR e.delegated_by = ")
.push_bind_unseparated(ex)
.push_unseparated(")");
}
Some(HexSearch::Range(lower, upper)) => {
range_authors
.push("((e.pub_key > ")
.push_bind_unseparated(lower.clone())
.push_unseparated(" AND e.pub_key < ")
.push_bind_unseparated(upper.clone())
.push_unseparated(") OR (e.delegated_by > ")
.push_bind_unseparated(lower)
.push_unseparated(" AND e.delegated_by < ")
.push_bind_unseparated(upper)
.push_unseparated("))");
}
Some(HexSearch::LowerOnly(lower)) => {
range_authors
.push("(e.pub_key > ")
.push_bind_unseparated(lower.clone())
.push_unseparated(" OR e.delegated_by > ")
.push_bind_unseparated(lower)
.push_unseparated(")");
}
None => {
info!("Could not parse hex range from author {:?}", auth);
}
}
push_and = true;
}
}
query.push(")");
}
}
// Query for Kind
if let Some(ks) = &f.kinds {
if !ks.is_empty() {
if push_and {
query.push(" AND ");
}
push_and = true;
query.push("e.kind in (");
let mut list_query = query.separated(", ");
for k in ks.iter() {
list_query.push_bind(*k as i64);
}
query.push(")");
}
}
// Query for event, allowing prefix matches
if let Some(id_vec) = &f.ids {
// filter out non-hex values
let id_vec: Vec<&String> = id_vec.iter().filter(|a| is_hex(a)).collect();
if !id_vec.is_empty() {
if push_and {
query.push(" AND (");
} else {
query.push("(");
}
push_and = true;
// shortcut ids into "IN" query
let any_is_range = id_vec.iter().any(|pk| pk.len() != 64);
if !any_is_range {
query.push("id in (");
let mut sep = query.separated(", ");
for id in id_vec.iter() {
sep.push_bind(hex::decode(id).ok());
}
query.push(")");
} else {
// take each author and convert to a hex search
let mut id_query = query.separated(" OR ");
for id in id_vec {
match hex_range(id) {
Some(HexSearch::Exact(ex)) => {
id_query
.push("(id = ")
.push_bind_unseparated(ex)
.push_unseparated(")");
}
Some(HexSearch::Range(lower, upper)) => {
id_query
.push("(id > ")
.push_bind_unseparated(lower)
.push_unseparated(" AND id < ")
.push_bind_unseparated(upper)
.push_unseparated(")");
}
Some(HexSearch::LowerOnly(lower)) => {
id_query
.push("(id > ")
.push_bind_unseparated(lower)
.push_unseparated(")");
}
None => {
info!("Could not parse hex range from id {:?}", id);
}
}
}
}
query.push(")");
}
}
// Query for tags
if let Some(map) = &f.tags {
if !map.is_empty() {
if push_and {
query.push(" AND ");
}
push_and = true;
for (key, val) in map.iter() {
query.push("e.id IN (SELECT ee.id FROM \"event\" ee LEFT JOIN tag t on ee.id = t.event_id WHERE ee.hidden != 1::bit(1) and (t.\"name\" = ")
.push_bind(key.to_string())
.push(" AND (value in (");
// plain value match first
let mut tag_query = query.separated(", ");
for v in val.iter() {
if (v.len() % 2 != 0) && !is_lower_hex(v) {
tag_query.push_bind(v.as_bytes());
} else {
tag_query.push_bind(hex::decode(v).ok());
}
}
query.push("))))");
}
}
}
// Query for timestamp
if f.since.is_some() {
if push_and {
query.push(" AND ");
}
push_and = true;
query
.push("e.created_at > ")
.push_bind(Utc.timestamp_opt(f.since.unwrap() as i64, 0).unwrap());
}
// Query for timestamp
if f.until.is_some() {
if push_and {
query.push(" AND ");
}
push_and = true;
query
.push("e.created_at < ")
.push_bind(Utc.timestamp_opt(f.until.unwrap() as i64, 0).unwrap());
}
// never display hidden events
if push_and {
query.push(" AND e.hidden != 1::bit(1)");
} else {
query.push("e.hidden != 1::bit(1)");
}
// Apply per-filter limit to this query.
// The use of a LIMIT implies a DESC order, to capture only the most recent events.
if let Some(lim) = f.limit {
query.push(" ORDER BY e.created_at DESC LIMIT ");
query.push(lim.min(1000));
} else {
query.push(" ORDER BY e.created_at ASC LIMIT ");
query.push(1000);
}
Some(query)
}
impl FromRow<'_, PgRow> for VerificationRecord {
fn from_row(row: &'_ PgRow) -> std::result::Result<Self, Error> {
let name =
Nip05Name::try_from(row.get::<'_, &str, &str>("name")).or(Err(RowNotFound))?;
Ok(VerificationRecord {
rowid: row.get::<'_, i64, &str>("id") as u64,
name,
address: hex::encode(row.get::<'_, Vec<u8>, &str>("pub_key")),
event: hex::encode(row.get::<'_, Vec<u8>, &str>("event_id")),
event_created: row.get::<'_, DateTime<Utc>, &str>("created_at").timestamp() as u64,
last_success: None,
last_failure: match row.try_get::<'_, DateTime<Utc>, &str>("failed_at") {
Ok(x) => Some(x.timestamp() as u64),
_ => None,
},
failure_count: row.get::<'_, i32, &str>("fail_count") as u64,
})
}
}

View File

@@ -0,0 +1,258 @@
use crate::repo::postgres::PostgresPool;
use async_trait::async_trait;
use sqlx::{Executor, Postgres, Transaction};
#[async_trait]
pub trait Migration {
fn serial_number(&self) -> i64;
async fn run(&self, tx: &mut Transaction<Postgres>);
}
struct SimpleSqlMigration {
pub serial_number: i64,
pub sql: Vec<&'static str>,
}
#[async_trait]
impl Migration for SimpleSqlMigration {
fn serial_number(&self) -> i64 {
self.serial_number
}
async fn run(&self, tx: &mut Transaction<Postgres>) {
for sql in self.sql.iter() {
tx.execute(*sql).await.unwrap();
}
}
}
/// Execute all migrations on the database.
pub async fn run_migrations(db: &PostgresPool) -> crate::error::Result<usize> {
prepare_migrations_table(db).await;
run_migration(m001::migration(), db).await;
let m002_result = run_migration(m002::migration(), db).await;
if m002_result == MigrationResult::Upgraded {
m002::rebuild_tags(db).await?;
}
run_migration(m003::migration(), db).await;
Ok(current_version(db).await as usize)
}
async fn current_version(db: &PostgresPool) -> i64 {
sqlx::query_scalar("SELECT max(serial_number) FROM migrations;")
.fetch_one(db)
.await
.unwrap()
}
async fn prepare_migrations_table(db: &PostgresPool) {
sqlx::query("CREATE TABLE IF NOT EXISTS migrations (serial_number bigint)")
.execute(db)
.await
.unwrap();
}
// Running a migration was either unnecessary, or completed
#[derive(PartialEq, Eq, Debug, Clone)]
enum MigrationResult {
Upgraded,
NotNeeded,
}
async fn run_migration(migration: impl Migration, db: &PostgresPool) -> MigrationResult {
let row: i64 =
sqlx::query_scalar("SELECT COUNT(*) AS count FROM migrations WHERE serial_number = $1")
.bind(migration.serial_number())
.fetch_one(db)
.await
.unwrap();
if row > 0 {
return MigrationResult::NotNeeded;
}
let mut transaction = db.begin().await.unwrap();
migration.run(&mut transaction).await;
sqlx::query("INSERT INTO migrations VALUES ($1)")
.bind(migration.serial_number())
.execute(&mut transaction)
.await
.unwrap();
transaction.commit().await.unwrap();
MigrationResult::Upgraded
}
mod m001 {
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
pub const VERSION: i64 = 1;
pub fn migration() -> impl Migration {
SimpleSqlMigration {
serial_number: VERSION,
sql: vec![
r#"
-- Events table
CREATE TABLE "event" (
id bytea NOT NULL,
pub_key bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
kind integer NOT NULL,
"content" bytea NOT NULL,
hidden bit(1) NOT NULL DEFAULT 0::bit(1),
delegated_by bytea NULL,
first_seen timestamp with time zone NOT NULL DEFAULT now(),
CONSTRAINT event_pkey PRIMARY KEY (id)
);
CREATE INDEX event_created_at_idx ON "event" (created_at,kind);
CREATE INDEX event_pub_key_idx ON "event" (pub_key);
CREATE INDEX event_delegated_by_idx ON "event" (delegated_by);
-- Tags table
CREATE TABLE "tag" (
id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY,
event_id bytea NOT NULL,
"name" varchar NOT NULL,
value bytea NOT NULL,
CONSTRAINT tag_fk FOREIGN KEY (event_id) REFERENCES "event"(id) ON DELETE CASCADE
);
CREATE INDEX tag_event_id_idx ON tag USING btree (event_id, name);
CREATE INDEX tag_value_idx ON tag USING btree (value);
-- NIP-05 Verfication table
CREATE TABLE "user_verification" (
id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY,
event_id bytea NOT NULL,
"name" varchar NOT NULL,
verified_at timestamptz NULL,
failed_at timestamptz NULL,
fail_count int4 NULL DEFAULT 0,
CONSTRAINT user_verification_pk PRIMARY KEY (id),
CONSTRAINT user_verification_fk FOREIGN KEY (event_id) REFERENCES "event"(id) ON DELETE CASCADE
);
CREATE INDEX user_verification_event_id_idx ON user_verification USING btree (event_id);
CREATE INDEX user_verification_name_idx ON user_verification USING btree (name);
"#,
],
}
}
}
mod m002 {
use async_std::stream::StreamExt;
use indicatif::{ProgressBar, ProgressStyle};
use sqlx::Row;
use std::time::Instant;
use tracing::info;
use crate::event::{single_char_tagname, Event};
use crate::repo::postgres::PostgresPool;
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
use crate::utils::is_lower_hex;
pub const VERSION: i64 = 2;
pub fn migration() -> impl Migration {
SimpleSqlMigration {
serial_number: VERSION,
sql: vec![
r#"
-- Add tag value column
ALTER TABLE tag ADD COLUMN value_hex bytea;
-- Remove not-null constraint
ALTER TABLE tag ALTER COLUMN value DROP NOT NULL;
-- Add value index
CREATE INDEX tag_value_hex_idx ON tag USING btree (value_hex);
"#,
],
}
}
pub async fn rebuild_tags(db: &PostgresPool) -> crate::error::Result<()> {
// Check how many events we have to process
let start = Instant::now();
let mut tx = db.begin().await.unwrap();
let mut update_tx = db.begin().await.unwrap();
// Clear out table
sqlx::query("DELETE FROM tag;")
.execute(&mut update_tx)
.await?;
{
let event_count: i64 = sqlx::query_scalar("SELECT COUNT(*) from event;")
.fetch_one(&mut tx)
.await
.unwrap();
let bar = ProgressBar::new(event_count.try_into().unwrap())
.with_message("rebuilding tags table");
bar.set_style(
ProgressStyle::with_template(
"[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}",
)
.unwrap(),
);
let mut events =
sqlx::query("SELECT id, content FROM event ORDER BY id;").fetch(&mut tx);
while let Some(row) = events.next().await {
bar.inc(1);
// get the row id and content
let row = row.unwrap();
let event_id: Vec<u8> = row.get(0);
let event_bytes: Vec<u8> = row.get(1);
let event: Event = serde_json::from_str(&String::from_utf8(event_bytes).unwrap())?;
for t in event.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
// safe because len was > 1
let tagval = t.get(1).unwrap();
// insert as BLOB if we can restore it losslessly.
// this means it needs to be even length and lowercase.
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
let q = "INSERT INTO tag (event_id, \"name\", value_hex) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING;";
sqlx::query(q)
.bind(&event_id)
.bind(tagname)
.bind(hex::decode(tagval).ok())
.execute(&mut update_tx)
.await?;
} else {
let q = "INSERT INTO tag (event_id, \"name\", value) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING;";
sqlx::query(q)
.bind(&event_id)
.bind(tagname)
.bind(tagval.as_bytes())
.execute(&mut update_tx)
.await?;
}
}
}
update_tx.commit().await?;
bar.finish();
}
info!("rebuilt tags in {:?}", start.elapsed());
Ok(())
}
}
mod m003 {
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
pub const VERSION: i64 = 3;
pub fn migration() -> impl Migration {
SimpleSqlMigration {
serial_number: VERSION,
sql: vec![
r#"
-- Add unique constraint on tag
ALTER TABLE tag ADD CONSTRAINT unique_constraint_name UNIQUE (event_id, "name", value);
"#,
],
}
}
}

1039
src/repo/sqlite.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -10,24 +10,28 @@ use rusqlite::Connection;
use std::cmp::Ordering;
use std::time::Instant;
use tracing::{debug, error, info};
use indicatif::{ProgressBar, ProgressStyle};
/// Startup DB Pragmas
pub const STARTUP_SQL: &str = r##"
PRAGMA main.synchronous=NORMAL;
PRAGMA main.synchronous = NORMAL;
PRAGMA foreign_keys = ON;
PRAGMA journal_size_limit=32768;
pragma mmap_size = 536870912; -- 512MB of mmap
PRAGMA journal_size_limit = 32768;
PRAGMA temp_store = 2; -- use memory, not temp files
PRAGMA main.cache_size = 20000; -- 80MB max cache size per conn
pragma mmap_size = 17179869184; -- cap mmap at 16GB
"##;
/// Latest database version
pub const DB_VERSION: usize = 9;
pub const DB_VERSION: usize = 16;
/// Schema definition
const INIT_SQL: &str = formatcp!(
r##"
-- Database settings
PRAGMA encoding = "UTF-8";
PRAGMA journal_mode=WAL;
PRAGMA journal_mode = WAL;
PRAGMA auto_vacuum = FULL;
PRAGMA main.synchronous=NORMAL;
PRAGMA foreign_keys = ON;
PRAGMA application_id = 1654008667;
@@ -49,24 +53,34 @@ content TEXT NOT NULL -- serialized json of event object
-- Event Indexes
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
CREATE INDEX IF NOT EXISTS author_index ON event(author);
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author);
CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at);
CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at);
CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind);
-- Tag Table
-- Tag values are stored as either a BLOB (if they come in as a
-- hex-string), or TEXT otherwise.
-- This means that searches need to select the appropriate column.
-- We duplicate the kind/created_at to make indexes much more efficient.
CREATE TABLE IF NOT EXISTS tag (
id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
name TEXT, -- the tag name ("p", "e", whatever)
value TEXT, -- the tag value, if not hex.
value_hex BLOB, -- the tag value, if it can be interpreted as a lowercase hex string.
created_at INTEGER NOT NULL, -- when the event was authored
kind INTEGER NOT NULL, -- event kind
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
CREATE INDEX IF NOT EXISTS tag_val_hex_index ON tag(value_hex);
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value);
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value);
CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,event_id);
-- NIP-05 User Validation
CREATE TABLE IF NOT EXISTS user_verification (
@@ -91,7 +105,21 @@ pub fn curr_db_version(conn: &mut Connection) -> Result<usize> {
Ok(curr_version)
}
fn mig_init(conn: &mut PooledConnection) -> Result<usize> {
/// Determine event count
pub fn db_event_count(conn: &mut Connection) -> Result<usize> {
let query = "SELECT count(*) FROM event;";
let count = conn.query_row(query, [], |row| row.get(0))?;
Ok(count)
}
/// Determine tag count
pub fn db_tag_count(conn: &mut Connection) -> Result<usize> {
let query = "SELECT count(*) FROM tag;";
let count = conn.query_row(query, [], |row| row.get(0))?;
Ok(count)
}
fn mig_init(conn: &mut PooledConnection) -> usize {
match conn.execute_batch(INIT_SQL) {
Ok(()) => {
info!(
@@ -104,11 +132,11 @@ fn mig_init(conn: &mut PooledConnection) -> Result<usize> {
panic!("database could not be initialized");
}
}
Ok(DB_VERSION)
DB_VERSION
}
/// Upgrade DB to latest version, and execute pragma settings
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<usize> {
// check the version.
let mut curr_version = curr_db_version(conn)?;
info!("DB version = {:?}", curr_version);
@@ -119,11 +147,11 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
);
debug!(
"SQLite max table/blob/text length: {} MB",
(conn.limit(Limit::SQLITE_LIMIT_LENGTH) as f64 / (1024 * 1024) as f64).floor()
(f64::from(conn.limit(Limit::SQLITE_LIMIT_LENGTH)) / f64::from(1024 * 1024)).floor()
);
debug!(
"SQLite max SQL length: {} MB",
(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH) as f64 / (1024 * 1024) as f64).floor()
(f64::from(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH)) / f64::from(1024 * 1024)).floor()
);
match curr_version.cmp(&DB_VERSION) {
@@ -131,26 +159,22 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
Ordering::Less => {
// initialize from scratch
if curr_version == 0 {
curr_version = mig_init(conn)?;
curr_version = mig_init(conn);
}
// for initialized but out-of-date schemas, proceed to
// upgrade sequentially until we are current.
if curr_version == 1 {
curr_version = mig_1_to_2(conn)?;
}
if curr_version == 2 {
curr_version = mig_2_to_3(conn)?;
}
if curr_version == 3 {
curr_version = mig_3_to_4(conn)?;
}
if curr_version == 4 {
curr_version = mig_4_to_5(conn)?;
}
if curr_version == 5 {
curr_version = mig_5_to_6(conn)?;
}
@@ -163,6 +187,27 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
if curr_version == 8 {
curr_version = mig_8_to_9(conn)?;
}
if curr_version == 9 {
curr_version = mig_9_to_10(conn)?;
}
if curr_version == 10 {
curr_version = mig_10_to_11(conn)?;
}
if curr_version == 11 {
curr_version = mig_11_to_12(conn)?;
}
if curr_version == 12 {
curr_version = mig_12_to_13(conn)?;
}
if curr_version == 13 {
curr_version = mig_13_to_14(conn)?;
}
if curr_version == 14 {
curr_version = mig_14_to_15(conn)?;
}
if curr_version == 15 {
curr_version = mig_15_to_16(conn)?;
}
if curr_version == DB_VERSION {
info!(
@@ -173,13 +218,12 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
}
// Database is current, all is good
Ordering::Equal => {
debug!("Database version was already current (v{})", DB_VERSION);
debug!("Database version was already current (v{DB_VERSION})");
}
// Database is newer than what this code understands, abort
Ordering::Greater => {
panic!(
"Database version is newer than supported by this executable (v{} > v{})",
curr_version, DB_VERSION
"Database version is newer than supported by this executable (v{curr_version} > v{DB_VERSION})",
);
}
}
@@ -187,9 +231,65 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
// Setup PRAGMA
conn.execute_batch(STARTUP_SQL)?;
debug!("SQLite PRAGMA startup completed");
Ok(DB_VERSION)
}
pub fn rebuild_tags(conn: &mut PooledConnection) -> Result<()> {
// Check how many events we have to process
let count = db_event_count(conn)?;
let update_each_percent = 0.05;
let mut percent_done = 0.0;
let mut events_processed = 0;
let start = Instant::now();
let tx = conn.transaction()?;
{
// Clear out table
tx.execute("DELETE FROM tag;", [])?;
let mut stmt = tx.prepare("select id, content from event order by id;")?;
let mut tag_rows = stmt.query([])?;
while let Some(row) = tag_rows.next()? {
if (events_processed as f32)/(count as f32) > percent_done {
info!("Tag update {}% complete...", (100.0*percent_done).round());
percent_done += update_each_percent;
}
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_id: u64 = row.get(0)?;
let event_json: String = row.get(1)?;
let event: Event = serde_json::from_str(&event_json)?;
// look at each event, and each tag, creating new tag entries if appropriate.
for t in event.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
// safe because len was > 1
let tagval = t.get(1).unwrap();
// insert as BLOB if we can restore it losslessly.
// this means it needs to be even length and lowercase.
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
tx.execute(
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
params![event_id, tagname, hex::decode(tagval).ok()],
)?;
} else {
// otherwise, insert as text
tx.execute(
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
params![event_id, tagname, &tagval],
)?;
}
}
events_processed += 1;
}
}
tx.commit()?;
info!("rebuilt tags in {:?}", start.elapsed());
Ok(())
}
//// Migration Scripts
fn mig_1_to_2(conn: &mut PooledConnection) -> Result<usize> {
@@ -321,7 +421,6 @@ fn mig_5_to_6(conn: &mut PooledConnection) -> Result<usize> {
let mut stmt = tx.prepare("select id, content from event order by id;")?;
let mut tag_rows = stmt.query([])?;
while let Some(row) = tag_rows.next()? {
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_id: u64 = row.get(0)?;
let event_json: String = row.get(1)?;
let event: Event = serde_json::from_str(&event_json)?;
@@ -363,7 +462,6 @@ fn mig_5_to_6(conn: &mut PooledConnection) -> Result<usize> {
fn mig_6_to_7(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 6->7");
// only change is adding a hidden column to events.
let upgrade_sql = r##"
ALTER TABLE event ADD delegated_by BLOB;
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
@@ -419,5 +517,215 @@ PRAGMA user_version = 9;
panic!("database could not be upgraded");
}
}
Ok(8)
Ok(9)
}
fn mig_9_to_10(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 9->10");
// Those old indexes were actually helpful...
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
PRAGMA user_version = 10;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v9 -> v10");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(10)
}
fn mig_10_to_11(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 10->11");
// Those old indexes were actually helpful...
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
reindex;
pragma optimize;
PRAGMA user_version = 11;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v10 -> v11");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(11)
}
fn mig_11_to_12(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 11->12");
let start = Instant::now();
let tx = conn.transaction()?;
{
// Lookup every replaceable event
let mut stmt = tx.prepare("select kind,author from event where kind in (0,3,41) or (kind>=10000 and kind<20000) order by id;")?;
let mut replaceable_rows = stmt.query([])?;
info!("updating replaceable events; this could take awhile...");
while let Some(row) = replaceable_rows.next()? {
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_kind: u64 = row.get(0)?;
let event_author: Vec<u8> = row.get(1)?;
tx.execute(
"UPDATE event SET hidden=TRUE WHERE hidden!=TRUE and kind=? and author=? and id NOT IN (SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1)",
params![event_kind, event_author, event_kind, event_author],
)?;
}
tx.execute("PRAGMA user_version = 12;", [])?;
}
tx.commit()?;
info!("database schema upgraded v11 -> v12 in {:?}", start.elapsed());
// vacuum after large table modification
let start = Instant::now();
conn.execute("VACUUM;", [])?;
info!("vacuumed DB after hidden event cleanup in {:?}", start.elapsed());
Ok(12)
}
fn mig_12_to_13(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 12->13");
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author);
reindex;
pragma optimize;
PRAGMA user_version = 13;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v12 -> v13");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(13)
}
fn mig_13_to_14(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 13->14");
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at);
pragma optimize;
PRAGMA user_version = 14;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v13 -> v14");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(14)
}
fn mig_14_to_15(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 14->15");
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at);
CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind);
PRAGMA user_version = 15;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v14 -> v15");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
// clear out hidden events
let clear_hidden_sql = r##"DELETE FROM event WHERE HIDDEN=true;"##;
info!("removing hidden events; this may take awhile...");
match conn.execute_batch(clear_hidden_sql) {
Ok(()) => {
info!("all hidden events removed");
},
Err(err) => {
error!("delete failed: {}", err);
panic!("could not remove hidden events");
}
}
Ok(15)
}
fn mig_15_to_16(conn: &mut PooledConnection) -> Result<usize> {
let count = db_event_count(conn)?;
info!("database schema needs update from 15->16 (this make take a few minutes)");
let upgrade_sql = r##"
DROP TABLE tag;
CREATE TABLE tag (
id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
name TEXT, -- the tag name ("p", "e", whatever)
value TEXT, -- the tag value, if not hex.
created_at INTEGER NOT NULL, -- when the event was authored
kind INTEGER NOT NULL, -- event kind
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value);
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value);
CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,event_id);
"##;
let start = Instant::now();
let tx = conn.transaction()?;
let bar = ProgressBar::new(count.try_into().unwrap())
.with_message("rebuilding tags table");
bar.set_style(
ProgressStyle::with_template(
"[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}",
)
.unwrap(),
);
{
tx.execute_batch(upgrade_sql)?;
let mut stmt = tx.prepare("select id, kind, created_at, content from event order by id;")?;
let mut tag_rows = stmt.query([])?;
let mut count = 0;
while let Some(row) = tag_rows.next()? {
count += 1;
if count%10==0 {
bar.inc(10);
}
let event_id: u64 = row.get(0)?;
let kind: u64 = row.get(1)?;
let created_at: u64 = row.get(2)?;
let event_json: String = row.get(3)?;
let event: Event = serde_json::from_str(&event_json)?;
// look at each event, and each tag, creating new tag entries if appropriate.
for t in event.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
// safe because len was > 1
let tagval = t.get(1).unwrap();
// otherwise, insert as text
tx.execute(
"INSERT INTO tag (event_id, name, value, kind, created_at) VALUES (?1, ?2, ?3, ?4, ?5);",
params![event_id, tagname, &tagval, kind, created_at],
)?;
}
}
tx.execute("PRAGMA user_version = 16;", [])?;
}
bar.finish();
tx.commit()?;
info!("database schema upgraded v15 -> v16 in {:?}", start.elapsed());
Ok(16)
}

View File

@@ -3,6 +3,7 @@ use crate::close::Close;
use crate::close::CloseCmd;
use crate::config::{Settings, VerifiedUsersMode};
use crate::conn;
use crate::repo::NostrRepo;
use crate::db;
use crate::db::SubmittedEvent;
use crate::error::{Error, Result};
@@ -12,8 +13,12 @@ use crate::info::RelayInfo;
use crate::nip05;
use crate::notice::Notice;
use crate::subscription::Subscription;
use prometheus::IntCounterVec;
use prometheus::IntGauge;
use prometheus::{Encoder, Histogram, IntCounter, HistogramOpts, Opts, Registry, TextEncoder};
use futures::SinkExt;
use futures::StreamExt;
use governor::{Jitter, Quota, RateLimiter};
use http::header::HeaderMap;
use hyper::header::ACCEPT;
use hyper::service::{make_service_fn, service_fn};
@@ -27,6 +32,8 @@ use std::collections::HashMap;
use std::convert::Infallible;
use std::net::SocketAddr;
use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::Ordering;
use std::sync::mpsc::Receiver as MpscReceiver;
use std::time::Duration;
use std::time::Instant;
@@ -35,22 +42,25 @@ use tokio::sync::broadcast::{self, Receiver, Sender};
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tokio_tungstenite::WebSocketStream;
use tracing::*;
use tracing::{debug, error, info, trace, warn};
use tungstenite::error::CapacityError::MessageTooLong;
use tungstenite::error::Error as WsError;
use tungstenite::handshake;
use tungstenite::protocol::Message;
use tungstenite::protocol::WebSocketConfig;
/// Handle arbitrary HTTP requests, including for WebSocket upgrades.
/// Handle arbitrary HTTP requests, including for `WebSocket` upgrades.
#[allow(clippy::too_many_arguments)]
async fn handle_web_request(
mut request: Request<Body>,
pool: db::SqlitePool,
repo: Arc<dyn NostrRepo>,
settings: Settings,
remote_addr: SocketAddr,
broadcast: Sender<Event>,
event_tx: tokio::sync::mpsc::Sender<SubmittedEvent>,
shutdown: Receiver<()>,
registry: Registry,
metrics: NostrMetrics,
) -> Result<Response<Body>, Infallible> {
match (
request.uri().path(),
@@ -73,6 +83,7 @@ async fn handle_web_request(
Ok(upgraded) => {
// set WebSocket configuration options
let config = WebSocketConfig {
max_send_queue: Some(1024),
max_message_size: settings.limits.max_ws_message_bytes,
max_frame_size: settings.limits.max_ws_frame_bytes,
..Default::default()
@@ -85,7 +96,8 @@ async fn handle_web_request(
tokio_tungstenite::tungstenite::protocol::Role::Server,
Some(config),
)
.await;
.await;
let origin = get_header_string("origin", request.headers());
let user_agent = get_header_string("user-agent", request.headers());
// determine the remote IP from headers if the exist
let header_ip = settings
@@ -99,24 +111,25 @@ async fn handle_web_request(
let client_info = ClientInfo {
remote_ip,
user_agent,
origin,
};
// spawn a nostr server with our websocket
tokio::spawn(nostr_server(
pool,
repo,
client_info,
settings,
ws_stream,
broadcast,
event_tx,
shutdown,
metrics,
));
}
// todo: trace, don't print...
Err(e) => println!(
"error when trying to upgrade connection \
from address {} to websocket connection. \
Error is: {}",
remote_addr, e
from address {remote_addr} to websocket connection. \
Error is: {e}",
),
}
});
@@ -126,7 +139,7 @@ async fn handle_web_request(
Err(error) => {
warn!("websocket response failed");
let mut res =
Response::new(Body::from(format!("Failed to create websocket: {}", error)));
Response::new(Body::from(format!("Failed to create websocket: {error}")));
*res.status_mut() = StatusCode::BAD_REQUEST;
return Ok(res);
}
@@ -147,26 +160,38 @@ async fn handle_web_request(
let rinfo = RelayInfo::from(settings.info);
let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap());
return Ok(Response::builder()
.status(200)
.header("Content-Type", "application/nostr+json")
.header("Access-Control-Allow-Origin", "*")
.body(b)
.unwrap());
.status(200)
.header("Content-Type", "application/nostr+json")
.header("Access-Control-Allow-Origin", "*")
.body(b)
.unwrap());
}
}
}
Ok(Response::builder()
.status(200)
.header("Content-Type", "text/plain")
.body(Body::from("Please use a Nostr client to connect."))
.unwrap())
.status(200)
.header("Content-Type", "text/plain")
.body(Body::from("Please use a Nostr client to connect."))
.unwrap())
}
("/metrics", false) => {
let mut buffer = vec![];
let encoder = TextEncoder::new();
let metric_families = registry.gather();
encoder.encode(&metric_families, &mut buffer).unwrap();
Ok(Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "text/plain")
.body(Body::from(buffer))
.unwrap())
}
(_, _) => {
//handle any other url
Ok(Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::from("Nothing here."))
.unwrap())
.status(StatusCode::NOT_FOUND)
.body(Body::from("Nothing here."))
.unwrap())
}
}
}
@@ -174,7 +199,7 @@ async fn handle_web_request(
fn get_header_string(header: &str, headers: &HeaderMap) -> Option<String> {
headers
.get(header)
.and_then(|x| x.to_str().ok().map(|x| x.to_string()))
.and_then(|x| x.to_str().ok().map(std::string::ToString::to_string))
}
// return on a control-c or internally requested shutdown signal
@@ -184,24 +209,98 @@ async fn ctrl_c_or_signal(mut shutdown_signal: Receiver<()>) {
loop {
tokio::select! {
_ = shutdown_signal.recv() => {
info!("Shutting down webserver as requested");
// server shutting down, exit loop
break;
},
_ = tokio::signal::ctrl_c() => {
info!("Shutting down webserver due to SIGINT");
break;
info!("Shutting down webserver as requested");
// server shutting down, exit loop
break;
},
_ = tokio::signal::ctrl_c() => {
info!("Shutting down webserver due to SIGINT");
break;
},
_ = term_signal.recv() => {
info!("Shutting down webserver due to SIGTERM");
break;
},
_ = term_signal.recv() => {
info!("Shutting down webserver due to SIGTERM");
break;
},
}
}
}
fn create_metrics() -> (Registry, NostrMetrics) {
// setup prometheus registry
let registry = Registry::new();
let query_sub = Histogram::with_opts(HistogramOpts::new(
"nostr_query_seconds",
"Subscription response times",
)).unwrap();
let query_db = Histogram::with_opts(HistogramOpts::new(
"nostr_filter_seconds",
"Filter SQL query times",
)).unwrap();
let write_events = Histogram::with_opts(HistogramOpts::new(
"nostr_events_write_seconds",
"Event writing response times",
)).unwrap();
let sent_events = IntCounterVec::new(
Opts::new("nostr_events_sent_total", "Events sent to clients"),
vec!["source"].as_slice(),
).unwrap();
let connections = IntCounter::with_opts(Opts::new(
"nostr_connections_total",
"New connections",
)).unwrap();
let db_connections = IntGauge::with_opts(Opts::new(
"nostr_db_connections", "Active database connections"
)).unwrap();
let query_aborts = IntCounterVec::new(
Opts::new("nostr_query_abort_total", "Aborted queries"),
vec!["reason"].as_slice(),
).unwrap();
let cmd_req = IntCounter::with_opts(Opts::new(
"nostr_cmd_req_total",
"REQ commands",
)).unwrap();
let cmd_event = IntCounter::with_opts(Opts::new(
"nostr_cmd_event_total",
"EVENT commands",
)).unwrap();
let cmd_close = IntCounter::with_opts(Opts::new(
"nostr_cmd_close_total",
"CLOSE commands",
)).unwrap();
let disconnects = IntCounterVec::new(
Opts::new("nostr_disconnects_total", "Client disconnects"),
vec!["reason"].as_slice(),
).unwrap();
registry.register(Box::new(query_sub.clone())).unwrap();
registry.register(Box::new(query_db.clone())).unwrap();
registry.register(Box::new(write_events.clone())).unwrap();
registry.register(Box::new(sent_events.clone())).unwrap();
registry.register(Box::new(connections.clone())).unwrap();
registry.register(Box::new(db_connections.clone())).unwrap();
registry.register(Box::new(query_aborts.clone())).unwrap();
registry.register(Box::new(cmd_req.clone())).unwrap();
registry.register(Box::new(cmd_event.clone())).unwrap();
registry.register(Box::new(cmd_close.clone())).unwrap();
registry.register(Box::new(disconnects.clone())).unwrap();
let metrics = NostrMetrics {
query_sub,
query_db,
write_events,
sent_events,
connections,
db_connections,
disconnects,
query_aborts,
cmd_req,
cmd_event,
cmd_close,
};
(registry,metrics)
}
/// Start running a Nostr relay server.
pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result<(), Error> {
pub fn start_server(settings: &Settings, shutdown_rx: MpscReceiver<()>) -> Result<(), Error> {
trace!("Config: {:?}", settings);
// do some config validation.
if !Path::new(&settings.database.data_directory).is_dir() {
@@ -243,7 +342,20 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
// configure tokio runtime
let rt = Builder::new_multi_thread()
.enable_all()
.thread_name("tokio-ws")
.thread_name_fn(|| {
// give each thread a unique numeric name
static ATOMIC_ID: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(0);
let id = ATOMIC_ID.fetch_add(1,Ordering::SeqCst);
format!("tokio-ws-{id}")
})
// limit concurrent SQLite blocking threads
.max_blocking_threads(settings.limits.max_blocking_threads)
.on_thread_start(|| {
trace!("started new thread: {:?}", std::thread::current().name());
})
.on_thread_stop(|| {
trace!("stopped thread: {:?}", std::thread::current().name());
})
.build()
.unwrap();
// start tokio
@@ -251,8 +363,6 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
let broadcast_buffer_limit = settings.limits.broadcast_buffer;
let persist_buffer_limit = settings.limits.event_persist_buffer;
let verified_users_active = settings.verified_users.is_active();
let db_min_conn = settings.database.min_conn;
let db_max_conn = settings.database.max_conn;
let settings = settings.clone();
info!("listening on: {}", socket_addr);
// all client-submitted valid events are broadcast to every
@@ -275,23 +385,28 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
// overwhelming this will drop events and won't register
// metadata events.
let (metadata_tx, metadata_rx) = broadcast::channel::<Event>(4096);
// start the database writer thread. Give it a channel for
let (registry, metrics) = create_metrics();
// build a repository for events
let repo = db::build_repo(&settings, metrics.clone()).await;
// start the database writer task. Give it a channel for
// writing events, and for publishing events that have been
// written (to all connected clients).
db::db_writer(
settings.clone(),
event_rx,
bcast_tx.clone(),
metadata_tx.clone(),
shutdown_listen,
)
.await;
tokio::task::spawn(
db::db_writer(
repo.clone(),
settings.clone(),
event_rx,
bcast_tx.clone(),
metadata_tx.clone(),
shutdown_listen,
));
info!("db writer created");
// create a nip-05 verifier thread; if enabled.
if settings.verified_users.mode != VerifiedUsersMode::Disabled {
let verifier_opt =
nip05::Verifier::new(metadata_rx, bcast_tx.clone(), settings.clone());
nip05::Verifier::new(repo.clone(), metadata_rx, bcast_tx.clone(), settings.clone());
if let Ok(mut v) = verifier_opt {
if verified_users_active {
tokio::task::spawn(async move {
@@ -301,6 +416,7 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
}
}
}
// listen for (external to tokio) shutdown request
let controlled_shutdown = invoke_shutdown.clone();
tokio::spawn(async move {
@@ -309,10 +425,9 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
Ok(()) => {
info!("control message requesting shutdown");
controlled_shutdown.send(()).ok();
}
},
Err(std::sync::mpsc::RecvError) => {
// FIXME: spurious error on startup?
debug!("shutdown requestor is disconnected");
trace!("shutdown requestor is disconnected (this is normal)");
}
};
});
@@ -326,36 +441,34 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
info!("shutting down due to SIGINT (main)");
ctrl_c_shutdown.send(()).ok();
});
// build a connection pool for sqlite connections
let pool = db::build_pool(
"client query",
&settings,
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY
| rusqlite::OpenFlags::SQLITE_OPEN_SHARED_CACHE,
db_min_conn,
db_max_conn,
true,
);
// spawn a task to check the pool size.
//let pool_monitor = pool.clone();
//tokio::spawn(async move {db::monitor_pool("reader", pool_monitor).await;});
// A `Service` is needed for every connection, so this
// creates one from our `handle_request` function.
let make_svc = make_service_fn(|conn: &AddrStream| {
let svc_pool = pool.clone();
let repo = repo.clone();
let remote_addr = conn.remote_addr();
let bcast = bcast_tx.clone();
let event = event_tx.clone();
let stop = invoke_shutdown.clone();
let settings = settings.clone();
let registry = registry.clone();
let metrics = metrics.clone();
async move {
// service_fn converts our function into a `Service`
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
handle_web_request(
request,
svc_pool.clone(),
repo.clone(),
settings.clone(),
remote_addr,
bcast.clone(),
event.clone(),
stop.subscribe(),
registry.clone(),
metrics.clone(),
)
}))
}
@@ -365,7 +478,7 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
.with_graceful_shutdown(ctrl_c_or_signal(webserver_shutdown_listen));
// run hyper in this thread. This is why the thread does not return.
if let Err(e) = server.await {
eprintln!("server error: {}", e);
eprintln!("server error: {e}");
}
});
Ok(())
@@ -383,11 +496,15 @@ pub enum NostrMessage {
CloseMsg(CloseCmd),
}
/// Convert Message to NostrMessage
fn convert_to_msg(msg: String, max_bytes: Option<usize>) -> Result<NostrMessage> {
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
/// Convert Message to `NostrMessage`
fn convert_to_msg(msg: &str, max_bytes: Option<usize>) -> Result<NostrMessage> {
let parsed_res: Result<NostrMessage> = serde_json::from_str(msg).map_err(std::convert::Into::into);
match parsed_res {
Ok(m) => {
if let NostrMessage::SubMsg(_) = m {
// note; this only prints the first 16k of a REQ and then truncates.
trace!("REQ: {:?}",msg);
};
if let NostrMessage::EventMsg(_) = m {
if let Some(max_size) = max_bytes {
// check length, ensure that some max size is set.
@@ -399,15 +516,15 @@ fn convert_to_msg(msg: String, max_bytes: Option<usize>) -> Result<NostrMessage>
Ok(m)
}
Err(e) => {
debug!("proto parse error: {:?}", e);
debug!("parse error on message: {}", msg.trim());
trace!("proto parse error: {:?}", e);
trace!("parse error on message: {:?}", msg.trim());
Err(Error::ProtoParseError)
}
}
}
/// Turn a string into a NOTICE message ready to send over a WebSocket
fn make_notice_message(notice: Notice) -> Message {
/// Turn a string into a NOTICE message ready to send over a `WebSocket`
fn make_notice_message(notice: &Notice) -> Message {
let json = match notice {
Notice::Message(ref msg) => json!(["NOTICE", msg]),
Notice::EventResult(ref res) => json!(["OK", res.id, res.status.to_bool(), res.msg]),
@@ -419,18 +536,21 @@ fn make_notice_message(notice: Notice) -> Message {
struct ClientInfo {
remote_ip: String,
user_agent: Option<String>,
origin: Option<String>,
}
/// Handle new client connections. This runs through an event loop
/// for all client communication.
#[allow(clippy::too_many_arguments)]
async fn nostr_server(
pool: db::SqlitePool,
repo: Arc<dyn NostrRepo>,
client_info: ClientInfo,
settings: Settings,
mut ws_stream: WebSocketStream<Upgraded>,
broadcast: Sender<Event>,
event_tx: mpsc::Sender<SubmittedEvent>,
mut shutdown: Receiver<()>,
metrics: NostrMetrics,
) {
// the time this websocket nostr server started
let orig_start = Instant::now();
@@ -438,13 +558,28 @@ async fn nostr_server(
let mut bcast_rx = broadcast.subscribe();
// Track internal client state
let mut conn = conn::ClientConn::new(client_info.remote_ip);
// subscription creation rate limiting
let mut sub_lim_opt = None;
// 100ms jitter when the rate limiter returns
let jitter = Jitter::up_to(Duration::from_millis(100));
let sub_per_min_setting = settings.limits.subscriptions_per_min;
if let Some(sub_per_min) = sub_per_min_setting {
if sub_per_min > 0 {
trace!("Rate limits for sub creation ({}/min)", sub_per_min);
let quota_time = core::num::NonZeroU32::new(sub_per_min).unwrap();
let quota = Quota::per_minute(quota_time);
sub_lim_opt = Some(RateLimiter::direct(quota));
}
}
// Use the remote IP as the client identifier
let cid = conn.get_client_prefix();
// Create a channel for receiving query results from the database.
// we will send out the tx handle to any query we generate.
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(256);
// this has capacity for some of the larger requests we see, which
// should allow the DB thread to release the handle earlier.
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(20_000);
// Create channel for receiving NOTICEs
let (notice_tx, mut notice_rx) = mpsc::channel::<Notice>(32);
let (notice_tx, mut notice_rx) = mpsc::channel::<Notice>(128);
// last time this client sent data (message, ping, etc.)
let mut last_message_time = Instant::now();
@@ -462,20 +597,30 @@ async fn nostr_server(
// when these subscriptions are cancelled, make a message
// available to the executing query so it knows to stop.
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
// keep track of the subscriptions we have
let mut current_subs: Vec<Subscription> = Vec::new();
// for stats, keep track of how many events the client published,
// and how many it received from queries.
let mut client_published_event_count: usize = 0;
let mut client_received_event_count: usize = 0;
debug!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
if let Some(ua) = client_info.user_agent {
debug!("cid: {}, user-agent: {:?}", cid, ua);
}
let unspec = "<unspecified>".to_string();
info!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
let origin = client_info.origin.as_ref().unwrap_or_else(|| &unspec);
let user_agent = client_info
.user_agent.as_ref()
.unwrap_or_else(|| &unspec);
info!(
"cid: {}, origin: {:?}, user-agent: {:?}",
cid, origin, user_agent
);
// Measure connections
metrics.connections.inc();
loop {
tokio::select! {
_ = shutdown.recv() => {
info!("Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?}", cid, conn.ip(), orig_start.elapsed());
metrics.disconnects.with_label_values(&["shutdown"]).inc();
info!("Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?}", cid, conn.ip(), orig_start.elapsed());
// server shutting down, exit loop
break;
},
@@ -484,22 +629,24 @@ async fn nostr_server(
// if it has been too long, disconnect
if last_message_time.elapsed() > max_quiet_time {
debug!("ending connection due to lack of client ping response");
metrics.disconnects.with_label_values(&["timeout"]).inc();
break;
}
// Send a ping
ws_stream.send(Message::Ping(Vec::new())).await.ok();
},
Some(notice_msg) = notice_rx.recv() => {
ws_stream.send(make_notice_message(notice_msg)).await.ok();
ws_stream.send(make_notice_message(&notice_msg)).await.ok();
},
Some(query_result) = query_rx.recv() => {
// database informed us of a query result we asked for
let subesc = query_result.sub_id.replace('"', "");
if query_result.event == "EOSE" {
let send_str = format!("[\"EOSE\",\"{}\"]", subesc);
let send_str = format!("[\"EOSE\",\"{subesc}\"]");
ws_stream.send(Message::Text(send_str)).await.ok();
} else {
client_received_event_count += 1;
metrics.sent_events.with_label_values(&["db"]).inc();
// send a result
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
ws_stream.send(Message::Text(send_str)).await.ok();
@@ -513,16 +660,16 @@ async fn nostr_server(
if !sub.interested_in_event(&global_event) {
continue;
}
// TODO: serialize at broadcast time, instead of
// once for each consumer.
if let Ok(event_str) = serde_json::to_string(&global_event) {
debug!("sub match for client: {}, sub: {:?}, event: {:?}",
trace!("sub match for client: {}, sub: {:?}, event: {:?}",
cid, s,
global_event.get_event_id_prefix());
// create an event response and send it
let subesc = s.replace('"', "");
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{}\",{}]", subesc, event_str))).await.ok();
metrics.sent_events.with_label_values(&["realtime"]).inc();
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{subesc}\",{event_str}]"))).await.ok();
} else {
warn!("could not serialize event: {:?}", global_event.get_event_id_prefix());
}
@@ -534,11 +681,11 @@ async fn nostr_server(
// Consume text messages from the client, parse into Nostr messages.
let nostr_msg = match ws_next {
Some(Ok(Message::Text(m))) => {
convert_to_msg(m,settings.limits.max_event_bytes)
convert_to_msg(&m,settings.limits.max_event_bytes)
},
Some(Ok(Message::Binary(_))) => {
ws_stream.send(
make_notice_message(Notice::message("binary messages are not accepted".into()))).await.ok();
Some(Ok(Message::Binary(_))) => {
ws_stream.send(
make_notice_message(&Notice::message("binary messages are not accepted".into()))).await.ok();
continue;
},
Some(Ok(Message::Ping(_) | Message::Pong(_))) => {
@@ -546,27 +693,32 @@ async fn nostr_server(
// send responses automatically.
continue;
},
Some(Err(WsError::Capacity(MessageTooLong{size, max_size}))) => {
ws_stream.send(
make_notice_message(Notice::message(format!("message too large ({} > {})",size, max_size)))).await.ok();
Some(Err(WsError::Capacity(MessageTooLong{size, max_size}))) => {
ws_stream.send(
make_notice_message(&Notice::message(format!("message too large ({size} > {max_size})")))).await.ok();
continue;
},
},
None |
Some(Ok(Message::Close(_)) |
Err(WsError::AlreadyClosed | WsError::ConnectionClosed |
WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
Some(Ok(Message::Close(_)) |
Err(WsError::AlreadyClosed | WsError::ConnectionClosed |
WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
=> {
debug!("websocket close from client (cid: {}, ip: {:?})",cid, conn.ip());
break;
},
metrics.disconnects.with_label_values(&["normal"]).inc();
break;
},
Some(Err(WsError::Io(e))) => {
// IO errors are considered fatal
warn!("IO error (cid: {}, ip: {:?}): {:?}", cid, conn.ip(), e);
metrics.disconnects.with_label_values(&["error"]).inc();
break;
}
x => {
// default condition on error is to close the client connection
info!("unknown error (cid: {}, ip: {:?}): {:?} (closing conn)", cid, conn.ip(), x);
metrics.disconnects.with_label_values(&["error"]).inc();
break;
}
};
@@ -578,93 +730,96 @@ async fn nostr_server(
// handle each type of message
let evid = ec.event_id().to_owned();
let parsed : Result<Event> = Result::<Event>::from(ec);
metrics.cmd_event.inc();
match parsed {
Ok(e) => {
let id_prefix:String = e.id.chars().take(8).collect();
debug!("successfully parsed/validated event: {:?} (cid: {})", id_prefix, cid);
debug!("successfully parsed/validated event: {:?} (cid: {}, kind: {})", id_prefix, cid, e.kind);
// check if the event is too far in the future.
if e.is_valid_timestamp(settings.options.reject_future_seconds) {
// Write this to the database.
let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone() };
let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone(), source_ip: conn.ip().to_string(), origin: client_info.origin.clone(), user_agent: client_info.user_agent.clone()};
event_tx.send(submit_event).await.ok();
client_published_event_count += 1;
} else {
info!("client: {} sent a far future-dated event", cid);
if let Some(fut_sec) = settings.options.reject_future_seconds {
let msg = format!("The event created_at field is out of the acceptable range (+{}sec) for this relay.",fut_sec);
let notice = Notice::invalid(e.id, &msg);
ws_stream.send(make_notice_message(notice)).await.ok();
}
}
} else {
info!("client: {} sent a far future-dated event", cid);
if let Some(fut_sec) = settings.options.reject_future_seconds {
let msg = format!("The event created_at field is out of the acceptable range (+{fut_sec}sec) for this relay.");
let notice = Notice::invalid(e.id, &msg);
ws_stream.send(make_notice_message(&notice)).await.ok();
}
}
},
Err(e) => {
info!("client sent an invalid event (cid: {})", cid);
ws_stream.send(make_notice_message(Notice::invalid(evid, &format!("{}", e)))).await.ok();
ws_stream.send(make_notice_message(&Notice::invalid(evid, &format!("{e}")))).await.ok();
}
}
},
Ok(NostrMessage::SubMsg(s)) => {
debug!("subscription requested (cid: {}, sub: {:?})", cid, s.id);
// subscription handling consists of:
// * check for rate limits
// * registering the subscription so future events can be matched
// * making a channel to cancel to request later
// * sending a request for a SQL query
// Do nothing if the sub already exists.
if !current_subs.contains(&s) {
current_subs.push(s.clone());
// Do nothing if the sub already exists.
if conn.has_subscription(&s) {
info!("client sent duplicate subscription, ignoring (cid: {}, sub: {:?})", cid, s.id);
} else {
metrics.cmd_req.inc();
if let Some(ref lim) = sub_lim_opt {
lim.until_ready_with_jitter(jitter).await;
}
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
match conn.subscribe(s.clone()) {
Ok(()) => {
Ok(()) => {
// when we insert, if there was a previous query running with the same name, cancel it.
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
previous_query.send(()).ok();
if let Some(previous_query) = running_queries.insert(s.id.clone(), abandon_query_tx) {
previous_query.send(()).ok();
}
// start a database query
db::db_query(s, cid.to_owned(), pool.clone(), query_tx.clone(), abandon_query_rx).await;
},
Err(e) => {
info!("Subscription error: {}", e);
ws_stream.send(make_notice_message(Notice::message(format!("Subscription error: {}", e)))).await.ok();
}
if s.needs_historical_events() {
// start a database query. this spawns a blocking database query on a worker thread.
repo.query_subscription(s, cid.clone(), query_tx.clone(), abandon_query_rx).await.ok();
}
},
Err(e) => {
info!("Subscription error: {} (cid: {}, sub: {:?})", e, cid, s.id);
ws_stream.send(make_notice_message(&Notice::message(format!("Subscription error: {e}")))).await.ok();
}
}
} else {
info!("client send duplicate subscription, ignoring (cid: {}, sub: {:?})", cid, s.id);
}
}
},
Ok(NostrMessage::CloseMsg(cc)) => {
// closing a request simply removes the subscription.
let parsed : Result<Close> = Result::<Close>::from(cc);
if let Ok(c) = parsed {
// remove from the list of known subs
if let Some(pos) = current_subs.iter().position(|s| *s.id == c.id) {
current_subs.remove(pos);
}
// check if a query is currently
// running, and remove it if so.
let stop_tx = running_queries.remove(&c.id);
if let Some(tx) = stop_tx {
tx.send(()).ok();
}
// stop checking new events against
// the subscription
conn.unsubscribe(&c);
} else {
info!("invalid command ignored");
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
if let Ok(c) = parsed {
metrics.cmd_close.inc();
// check if a query is currently
// running, and remove it if so.
let stop_tx = running_queries.remove(&c.id);
if let Some(tx) = stop_tx {
tx.send(()).ok();
}
// stop checking new events against
// the subscription
conn.unsubscribe(&c);
} else {
info!("invalid command ignored");
ws_stream.send(make_notice_message(&Notice::message("could not parse command".into()))).await.ok();
}
},
Err(Error::ConnError) => {
debug!("got connection close/error, disconnecting cid: {}, ip: {:?}",cid, conn.ip());
break;
}
Err(Error::EventMaxLengthError(s)) => {
info!("client sent event larger ({} bytes) than max size (cid: {})", s, cid);
ws_stream.send(make_notice_message(Notice::message("event exceeded max size".into()))).await.ok();
info!("client sent command larger ({} bytes) than max size (cid: {})", s, cid);
ws_stream.send(make_notice_message(&Notice::message("event exceeded max size".into()))).await.ok();
},
Err(Error::ProtoParseError) => {
info!("client sent event that could not be parsed (cid: {})", cid);
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
info!("client sent command that could not be parsed (cid: {})", cid);
ws_stream.send(make_notice_message(&Notice::message("could not parse command".into()))).await.ok();
},
Err(e) => {
info!("got non-fatal error from client (cid: {}, error: {:?}", cid, e);
@@ -683,6 +838,22 @@ async fn nostr_server(
conn.ip(),
client_published_event_count,
client_received_event_count,
orig_start.elapsed()
orig_start.elapsed()
);
}
#[derive(Clone)]
pub struct NostrMetrics {
pub query_sub: Histogram, // response time of successful subscriptions
pub query_db: Histogram, // individual database query execution time
pub db_connections: IntGauge, // database connections in use
pub write_events: Histogram, // response time of event writes
pub sent_events: IntCounterVec, // count of events sent to clients
pub connections: IntCounter, // count of websocket connections
pub disconnects: IntCounterVec, // client disconnects
pub query_aborts: IntCounterVec, // count of queries aborted by server
pub cmd_req: IntCounter, // count of REQ commands received
pub cmd_event: IntCounter, // count of EVENT commands received
pub cmd_close: IntCounter, // count of CLOSE commands received
}

View File

@@ -2,7 +2,8 @@
use crate::error::Result;
use crate::event::Event;
use serde::de::Unexpected;
use serde::{Deserialize, Deserializer, Serialize};
use serde::ser::SerializeMap;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_json::Value;
use std::collections::HashMap;
use std::collections::HashSet;
@@ -19,7 +20,7 @@ pub struct Subscription {
/// Corresponds to client-provided subscription request elements. Any
/// element can be present if it should be used in filtering, or
/// absent ([`None`]) if it should be ignored.
#[derive(Serialize, PartialEq, Eq, Debug, Clone)]
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct ReqFilter {
/// Event hashes
pub ids: Option<Vec<String>>,
@@ -34,7 +35,6 @@ pub struct ReqFilter {
/// Limit number of results
pub limit: Option<u64>,
/// Set of tags
#[serde(skip)]
pub tags: Option<HashMap<char, HashSet<String>>>,
/// Force no matches due to malformed data
// we can't represent it in the req filter, so we don't want to
@@ -43,6 +43,40 @@ pub struct ReqFilter {
pub force_no_match: bool,
}
impl Serialize for ReqFilter {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S:Serializer,
{
let mut map = serializer.serialize_map(None)?;
if let Some(ids) = &self.ids {
map.serialize_entry("ids", &ids)?;
}
if let Some(kinds) = &self.kinds {
map.serialize_entry("kinds", &kinds)?;
}
if let Some(until) = &self.until {
map.serialize_entry("until", until)?;
}
if let Some(since) = &self.since {
map.serialize_entry("since", since)?;
}
if let Some(limit) = &self.limit {
map.serialize_entry("limit", limit)?;
}
if let Some(authors) = &self.authors {
map.serialize_entry("authors", &authors)?;
}
// serialize tags
if let Some(tags) = &self.tags {
for (k,v) in tags {
let vals:Vec<&String> = v.iter().collect();
map.serialize_entry(&format!("#{k}"), &vals)?;
}
}
map.end()
}
}
impl<'de> Deserialize<'de> for ReqFilter {
fn deserialize<D>(deserializer: D) -> Result<ReqFilter, D::Error>
where
@@ -65,12 +99,21 @@ impl<'de> Deserialize<'de> for ReqFilter {
tags: None,
force_no_match: false,
};
let empty_string = "".into();
let mut ts = None;
// iterate through each key, and assign values that exist
for (key, val) in filter.into_iter() {
for (key, val) in filter {
// ids
if key == "ids" {
rf.ids = Deserialize::deserialize(val).ok();
let raw_ids: Option<Vec<String>>= Deserialize::deserialize(val).ok();
if let Some(a) = raw_ids.as_ref() {
if a.contains(&empty_string) {
return Err(serde::de::Error::invalid_type(
Unexpected::Other("prefix matches must not be empty strings"),
&"a json object"));
}
}
rf.ids =raw_ids;
} else if key == "kinds" {
rf.kinds = Deserialize::deserialize(val).ok();
} else if key == "since" {
@@ -80,7 +123,15 @@ impl<'de> Deserialize<'de> for ReqFilter {
} else if key == "limit" {
rf.limit = Deserialize::deserialize(val).ok();
} else if key == "authors" {
rf.authors = Deserialize::deserialize(val).ok();
let raw_authors: Option<Vec<String>>= Deserialize::deserialize(val).ok();
if let Some(a) = raw_authors.as_ref() {
if a.contains(&empty_string) {
return Err(serde::de::Error::invalid_type(
Unexpected::Other("prefix matches must not be empty strings"),
&"a json object"));
}
}
rf.authors = raw_authors;
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
if let Some(tag_search) = tag_search_char_from_filter(key) {
if ts.is_none() {
@@ -90,7 +141,7 @@ impl<'de> Deserialize<'de> for ReqFilter {
if let Some(m) = ts.as_mut() {
let tag_vals: Option<Vec<String>> = Deserialize::deserialize(val).ok();
if let Some(v) = tag_vals {
let hs = HashSet::from_iter(v.into_iter());
let hs = v.into_iter().collect::<HashSet<_>>();
m.insert(tag_search.to_owned(), hs);
}
};
@@ -171,6 +222,7 @@ impl<'de> Deserialize<'de> for Subscription {
// create indexes
filters.push(f);
}
filters.dedup();
Ok(Subscription {
id: sub_id.to_owned(),
filters,
@@ -180,13 +232,20 @@ impl<'de> Deserialize<'de> for Subscription {
impl Subscription {
/// Get a copy of the subscription identifier.
pub fn get_id(&self) -> String {
#[must_use] pub fn get_id(&self) -> String {
self.id.clone()
}
/// Determine if any filter is requesting historical (database)
/// queries. If every filter has limit:0, we do not need to query the DB.
#[must_use] pub fn needs_historical_events(&self) -> bool {
self.filters.iter().any(|f| f.limit!=Some(0))
}
/// Determine if this subscription matches a given [`Event`]. Any
/// individual filter match is sufficient.
pub fn interested_in_event(&self, event: &Event) -> bool {
for f in self.filters.iter() {
#[must_use] pub fn interested_in_event(&self, event: &Event) -> bool {
for f in &self.filters {
if f.interested_in_event(event) {
return true;
}
@@ -209,23 +268,20 @@ impl ReqFilter {
fn ids_match(&self, event: &Event) -> bool {
self.ids
.as_ref()
.map(|vs| prefix_match(vs, &event.id))
.unwrap_or(true)
.map_or(true, |vs| prefix_match(vs, &event.id))
}
fn authors_match(&self, event: &Event) -> bool {
self.authors
.as_ref()
.map(|vs| prefix_match(vs, &event.pubkey))
.unwrap_or(true)
.map_or(true, |vs| prefix_match(vs, &event.pubkey))
}
fn delegated_authors_match(&self, event: &Event) -> bool {
if let Some(delegated_pubkey) = &event.delegated_by {
self.authors
.as_ref()
.map(|vs| prefix_match(vs, delegated_pubkey))
.unwrap_or(true)
.map_or(true, |vs| prefix_match(vs, delegated_pubkey))
} else {
false
}
@@ -251,16 +307,15 @@ impl ReqFilter {
fn kind_match(&self, kind: u64) -> bool {
self.kinds
.as_ref()
.map(|ks| ks.contains(&kind))
.unwrap_or(true)
.map_or(true, |ks| ks.contains(&kind))
}
/// Determine if all populated fields in this filter match the provided event.
pub fn interested_in_event(&self, event: &Event) -> bool {
#[must_use] pub fn interested_in_event(&self, event: &Event) -> bool {
// self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
self.ids_match(event)
&& self.since.map(|t| event.created_at > t).unwrap_or(true)
&& self.until.map(|t| event.created_at < t).unwrap_or(true)
&& self.since.map_or(true, |t| event.created_at > t)
&& self.until.map_or(true, |t| event.created_at < t)
&& self.kind_match(event.kind)
&& (self.authors_match(event) || self.delegated_authors_match(event))
&& self.tag_match(event)
@@ -294,6 +349,24 @@ mod tests {
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
}
#[test]
fn req_empty_authors_prefix() {
let raw_json = "[\"REQ\",\"some-id\",{\"authors\": [\"\"]}]";
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
}
#[test]
fn req_empty_ids_prefix() {
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\"]}]";
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
}
#[test]
fn req_empty_ids_prefix_mixed() {
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\",\"aaa\"]}]";
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
}
#[test]
fn legacy_filter() {
// legacy field in filter
@@ -301,6 +374,23 @@ mod tests {
assert!(serde_json::from_str::<Subscription>(raw_json).is_ok());
}
#[test]
fn dupe_filter() -> Result<()> {
let raw_json = r#"["REQ","some-id",{"kinds": [1984]}, {"kinds": [1984]}]"#;
let s: Subscription = serde_json::from_str(raw_json)?;
assert_eq!(s.filters.len(), 1);
Ok(())
}
#[test]
fn dupe_filter_many() -> Result<()> {
// duplicate filters in different order
let raw_json = r#"["REQ","some-id",{"kinds":[1984]},{"kinds":[1984]},{"kinds":[1984]},{"kinds":[1984]}]"#;
let s: Subscription = serde_json::from_str(raw_json)?;
assert_eq!(s.filters.len(), 1);
Ok(())
}
#[test]
fn author_filter() -> Result<()> {
let raw_json = r#"["REQ","some-id",{"authors": ["test-author-id"]}]"#;
@@ -532,4 +622,22 @@ mod tests {
assert!(!s.interested_in_event(&e));
Ok(())
}
#[test]
fn serialize_filter() -> Result<()> {
let s: Subscription = serde_json::from_str(r##"["REQ","xyz",{"authors":["abc", "bcd"], "since": 10, "until": 20, "limit":100, "#e": ["foo", "bar"], "#d": ["test"]}]"##)?;
let f = s.filters.get(0);
let serialized = serde_json::to_string(&f)?;
let serialized_wrapped = format!(r##"["REQ", "xyz",{}]"##, serialized);
let parsed: Subscription = serde_json::from_str(&serialized_wrapped)?;
let parsed_filter = parsed.filters.get(0);
if let Some(pf) = parsed_filter {
assert_eq!(pf.since, Some(10));
assert_eq!(pf.until, Some(20));
assert_eq!(pf.limit, Some(100));
} else {
assert!(false, "filter could not be parsed");
}
Ok(())
}
}

View File

@@ -1,8 +1,9 @@
//! Common utility functions
use bech32::FromBase32;
use std::time::SystemTime;
/// Seconds since 1970.
pub fn unix_time() -> u64 {
#[must_use] pub fn unix_time() -> u64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
@@ -10,12 +11,23 @@ pub fn unix_time() -> u64 {
}
/// Check if a string contains only hex characters.
pub fn is_hex(s: &str) -> bool {
#[must_use] pub fn is_hex(s: &str) -> bool {
s.chars().all(|x| char::is_ascii_hexdigit(&x))
}
/// Check if string is a nip19 string
pub fn is_nip19(s: &str) -> bool {
s.starts_with("npub") || s.starts_with("note")
}
pub fn nip19_to_hex(s: &str) -> Result<String, bech32::Error> {
let (_hrp, data, _checksum) = bech32::decode(s)?;
let data = Vec::<u8>::from_base32(&data)?;
Ok(hex::encode(data))
}
/// Check if a string contains only lower-case hex chars.
pub fn is_lower_hex(s: &str) -> bool {
#[must_use] pub fn is_lower_hex(s: &str) -> bool {
s.chars().all(|x| {
(char::is_ascii_lowercase(&x) || char::is_ascii_digit(&x)) && char::is_ascii_hexdigit(&x)
})
@@ -30,4 +42,21 @@ mod tests {
let hexstr = "abcd0123";
assert_eq!(is_lower_hex(hexstr), true);
}
#[test]
fn nip19() {
let hexkey = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d";
let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6";
assert_eq!(is_nip19(hexkey), false);
assert_eq!(is_nip19(nip19key), true);
}
#[test]
fn nip19_hex() {
let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6";
let expected = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d";
let got = nip19_to_hex(nip19key).unwrap();
assert_eq!(expected, got);
}
}

10
tests/cli.rs Normal file
View File

@@ -0,0 +1,10 @@
#[cfg(test)]
mod tests {
use nostr_rs_relay::cli::CLIArgs;
#[test]
fn cli_tests() {
use clap::CommandFactory;
CLIArgs::command().debug_assert();
}
}

View File

@@ -36,9 +36,9 @@ pub fn start_relay() -> Result<Relay> {
settings.database.min_conn = 4;
settings.database.max_conn = 8;
let (shutdown_tx, shutdown_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
let handle = thread::spawn(|| {
let handle = thread::spawn(move || {
// server will block the thread it is run on.
let _ = start_server(settings, shutdown_rx);
let _ = start_server(&settings, shutdown_rx);
});
// how do we know the relay has finished starting up?
Ok(Relay {