Compare commits

...

64 Commits

Author SHA1 Message Date
Greg Heartsfield
cf3e67500f build: bump version to 0.7.17 2023-01-15 15:48:39 -06:00
Greg Heartsfield
1d19442cfd improvement: upgrade multiple dependencies
Updating crates.io index
Updating async-trait v0.1.60 -> v0.1.61
Updating axum v0.6.1 -> v0.6.2
Updating axum-core v0.3.0 -> v0.3.1
Updating clap v4.0.32 -> v4.1.1
Updating clap_derive v4.0.21 -> v4.1.0
Updating clap_lex v0.3.0 -> v0.3.1
Updating cxx v1.0.85 -> v1.0.86
Updating cxx-build v1.0.85 -> v1.0.86
Updating cxxbridge-flags v1.0.85 -> v1.0.86
Updating cxxbridge-macro v1.0.85 -> v1.0.86
Updating io-lifetimes v1.0.3 -> v1.0.4
Updating nom v7.1.2 -> v7.1.3
Updating parking_lot_core v0.9.5 -> v0.9.6
Updating pest v2.5.2 -> v2.5.3
Updating pest_derive v2.5.2 -> v2.5.3
Updating pest_generator v2.5.2 -> v2.5.3
Updating pest_meta v2.5.2 -> v2.5.3
Updating prost v0.11.5 -> v0.11.6
Updating prost-derive v0.11.5 -> v0.11.6
Updating prost-types v0.11.5 -> v0.11.6
Updating regex v1.7.0 -> v1.7.1
Updating schannel v0.1.20 -> v0.1.21
Removing sha1 v0.10.5
Adding sha2 v0.10.6
Updating termcolor v1.1.3 -> v1.2.0
Updating tokio v1.23.1 -> v1.24.1
Updating try-lock v0.2.3 -> v0.2.4
Removing windows-sys v0.36.1
Updating windows_aarch64_gnullvm v0.42.0 -> v0.42.1
Removing windows_aarch64_msvc v0.36.1
Removing windows_aarch64_msvc v0.42.0
Adding windows_aarch64_msvc v0.42.1
Removing windows_i686_gnu v0.36.1
Removing windows_i686_gnu v0.42.0
Adding windows_i686_gnu v0.42.1
Removing windows_i686_msvc v0.36.1
Removing windows_i686_msvc v0.42.0
Adding windows_i686_msvc v0.42.1
Removing windows_x86_64_gnu v0.36.1
Removing windows_x86_64_gnu v0.42.0
Adding windows_x86_64_gnu v0.42.1
Updating windows_x86_64_gnullvm v0.42.0 -> v0.42.1
Removing windows_x86_64_msvc v0.36.1
Removing windows_x86_64_msvc v0.42.0
Adding windows_x86_64_msvc v0.42.1
2023-01-15 15:46:33 -06:00
Greg Heartsfield
13cc24b5cd improvement: log blacklisted events 2023-01-15 15:42:27 -06:00
Greg Heartsfield
f543957b34 improvement: clear out hidden events during schema upgrade 2023-01-15 15:27:41 -06:00
Greg Heartsfield
7021f102e8 improvement: delete replaceable events 2023-01-15 15:13:10 -06:00
Greg Heartsfield
fddbf321bc perf: add indexes and force their use (authors) 2023-01-15 10:52:49 -06:00
Greg Heartsfield
3e7f2e21df perf: force authors index to be used if possible 2023-01-15 10:23:46 -06:00
Greg Heartsfield
9d9c6c78d1 improvement: refuse to insert events that would automatically be hidden 2023-01-15 10:01:01 -06:00
Greg Heartsfield
703b2efe6e refactor: replaceable check in event 2023-01-15 09:18:53 -06:00
Greg Heartsfield
0db6487ce3 fix: allow tokio tracing to be enabled
fixes https://github.com/scsibug/nostr-rs-relay/issues/48
2023-01-14 09:47:23 -06:00
Rasmus Schlunsen
ba987d3212 docs: update example nginx configuration to ensure A+ rating
config from https://www.ssllabs.com/ssltest/
2023-01-14 09:33:40 -06:00
Rasmus Schlunsen
73f4f60cc7 improvement: use clap for command line args 2023-01-14 09:22:11 -06:00
Greg Heartsfield
d06d227ebe improvement: lower REQ logging and note possible truncation 2023-01-11 16:56:40 -06:00
Greg Heartsfield
3519488c4e improvement: lower logging for failed REQ parses 2023-01-10 07:41:49 -06:00
Greg Heartsfield
fbd3315110 improvement: log REQ messages at debug level 2023-01-09 22:12:20 -06:00
Greg Heartsfield
3d3d1bde53 refactor: clippy suggestions 2023-01-09 22:12:04 -06:00
Greg Heartsfield
ed336111bb improvement: alert before long-running migration 2023-01-09 22:11:25 -06:00
Greg Heartsfield
8aed572989 docs: add link to relay setup 2023-01-09 21:33:59 -06:00
Greg Heartsfield
62e8da689d fix: do not force kind_created_at_index when there are tags 2023-01-06 12:57:48 -06:00
Greg Heartsfield
807d1aa384 improvement: log index names used 2023-01-06 12:50:52 -06:00
Greg Heartsfield
66a55b55b9 perf: new index, manually selected when appropriate 2023-01-06 12:17:30 -06:00
Greg Heartsfield
76c77c3e56 feat: bulk loading script for importing events 2023-01-06 12:16:19 -06:00
Greg Heartsfield
50daab8a6f refactor: make a standalone re-tagging function 2023-01-06 06:57:56 -06:00
Greg Heartsfield
ffd4e6f997 build: bump version to 0.7.16 2023-01-04 17:28:05 -06:00
Greg Heartsfield
bbd716963e improvement: update multiple dependencies
Updating anyhow v1.0.67 -> v1.0.68
Updating cxx v1.0.84 -> v1.0.85
Updating cxx-build v1.0.84 -> v1.0.85
Updating cxxbridge-flags v1.0.84 -> v1.0.85
Updating cxxbridge-macro v1.0.84 -> v1.0.85
Updating hermit-abi v0.1.19 -> v0.2.6
Updating libc v0.2.138 -> v0.2.139
Updating nom v7.1.1 -> v7.1.2
Updating num_cpus v1.14.0 -> v1.15.0
Updating once_cell v1.16.0 -> v1.17.0
Updating openssl v0.10.44 -> v0.10.45
Updating openssl-sys v0.9.79 -> v0.9.80
Updating pest v2.5.1 -> v2.5.2
Updating pest_derive v2.5.1 -> v2.5.2
Updating pest_generator v2.5.1 -> v2.5.2
Updating pest_meta v2.5.1 -> v2.5.2
Updating proc-macro2 v1.0.48 -> v1.0.49
Updating prost v0.11.3 -> v0.11.5
Updating prost-derive v0.11.2 -> v0.11.5
Updating prost-types v0.11.2 -> v0.11.5
Updating quote v1.0.22 -> v1.0.23
Updating serde v1.0.151 -> v1.0.152
Updating serde_derive v1.0.151 -> v1.0.152
Updating serde_json v1.0.90 -> v1.0.91
Updating syn v1.0.106 -> v1.0.107
Updating tokio v1.23.0 -> v1.23.1
2023-01-04 17:26:22 -06:00
Greg Heartsfield
ca95e8cf22 docs(NIP-26): reflect NIP-26 being disabled in README 2023-01-04 16:54:52 -06:00
Greg Heartsfield
e9d2a2cbd0 perf(NIP-26): temporarily disable NIP-26 delegated events 2023-01-04 16:51:22 -06:00
Greg Heartsfield
39a945b493 perf: separate author/delegated_by queries, minor improvement 2023-01-04 16:51:17 -06:00
Greg Heartsfield
9a84dc19e9 perf: author/kind index added (schema v13) 2023-01-04 16:51:02 -06:00
Greg Heartsfield
20c4bb42eb fix: correct log message 2023-01-03 21:24:46 -06:00
JesterHodl
0e519f6b77 feat: add --help and --version flags
fixes: https://github.com/scsibug/nostr-rs-relay/issues/42
2023-01-03 17:39:21 -06:00
Greg Heartsfield
3dd0f2c9c6 fix: do not run auto_vacuum on read-only connections 2023-01-03 17:32:55 -06:00
Greg Heartsfield
b7c8737166 improvement: enable auto_vacuum on database creation 2023-01-03 06:22:43 -06:00
Greg Heartsfield
c0b112c094 improvement: enable auto_vacuum on connections 2023-01-03 06:22:04 -06:00
Greg Heartsfield
cb283ac316 fix: ensure that replaceable events are handled correctly regardless of order receieved 2023-01-02 17:18:11 -06:00
Greg Heartsfield
2c6ac69bfd docs: remove incorrect comment 2023-01-02 15:41:17 -06:00
Greg Heartsfield
d929ae2752 improvement: define websocket send queue (unlimited->1024) 2023-01-02 15:39:28 -06:00
Greg Heartsfield
14fe9f9ee1 improvement: remove pauses for backups, likely not needed w/ WAL compaction 2023-01-02 15:38:30 -06:00
0xtr
7774db8c47 feat: add event kind blacklist
Adds a list to the config where you can specify which event kinds to blacklist.
The blacklist check will run right after verifying that the pubkey is allowed
to post events to the relay.
2022-12-27 17:10:34 -06:00
Greg Heartsfield
104ef2b9e1 build: bump version to 0.7.15 2022-12-27 17:04:48 -06:00
Greg Heartsfield
c06139ec99 docs: start of database maintenance tips 2022-12-27 17:00:14 -06:00
Greg Heartsfield
19ec89593d improvement: drop queries that are running during a checkpoint 2022-12-27 15:24:10 -06:00
Greg Heartsfield
27902bc5f4 improvement: move reader mutex closer to DB connection acquisition 2022-12-27 10:28:56 -06:00
Greg Heartsfield
d2adddaee4 improvement: extend allowed wal_checkpoint timeout to 10 sec 2022-12-27 10:13:14 -06:00
Greg Heartsfield
b23b3ce8ec improvement: block new readers when WAL is large 2022-12-27 09:48:07 -06:00
Greg Heartsfield
5f9fe1ce59 improvement: do not send realtime only filters to the DB (limit:0) 2022-12-26 12:20:36 -06:00
Greg Heartsfield
6a8c4ed1b5 build: bump version to 0.7.14 2022-12-26 11:26:48 -06:00
Greg Heartsfield
966c853700 docs: non-docker quick start 2022-12-26 10:34:09 -06:00
Greg Heartsfield
65fd0ed08b feat: increase wal_checkpoint time when WAL is large 2022-12-26 10:03:51 -06:00
Greg Heartsfield
0b51675b38 improvement: change suggestion and default for max sqlite DB readers 2022-12-25 11:17:08 -06:00
Greg Heartsfield
2e22334631 refactor: formatting 2022-12-25 11:06:30 -06:00
Greg Heartsfield
cb2ac4bf0f improvement: give threads unique names 2022-12-25 10:47:32 -06:00
Greg Heartsfield
38dc7789dc improvement: cleaner slow query logs 2022-12-25 10:47:32 -06:00
Greg Heartsfield
ce0e00ffb3 feat: log reader DB pool stats every minute 2022-12-25 10:47:32 -06:00
Greg Heartsfield
3e4ae4aeec feat: cache prepared statements and trace expanded SQL queries 2022-12-25 10:47:32 -06:00
Greg Heartsfield
c6a8807485 improvement: send error on empty-string prefix author/id searches 2022-12-25 10:47:32 -06:00
Greg Heartsfield
8137b6211c refactor: clippy suggestions 2022-12-24 10:29:47 -06:00
Greg Heartsfield
29effaae23 build: remove pre-commit rustfmt check 2022-12-24 10:29:30 -06:00
Greg Heartsfield
e5074f2e46 feat(NIP-28): replaceable kind 41 channel metadata events 2022-12-24 10:14:43 -06:00
Blake Jakopovic
4fd7643907 feat: change pub(crate) to pub for use as a library 2022-12-23 07:14:58 -06:00
Greg Heartsfield
1e1ec69175 build: remove unnecessary dockerfile mod script 2022-12-23 06:52:09 -06:00
benthecarman
e08647867c refactor: remove code duplication for simple_event 2022-12-23 06:39:50 -06:00
Greg Heartsfield
ae0f7171ed build: remove digest-locked docker base images 2022-12-23 06:30:59 -06:00
Greg Heartsfield
4f1a912f36 feat: log origin header from websocket requests
fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/33
2022-12-22 16:55:53 -06:00
22 changed files with 1216 additions and 356 deletions

View File

@@ -11,6 +11,6 @@ repos:
- repo: https://github.com/doublify/pre-commit-rust
rev: v1.0
hooks:
- id: fmt
# - id: fmt
- id: cargo-check
- id: clippy

385
Cargo.lock generated
View File

@@ -54,9 +54,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.67"
version = "1.0.68"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7724808837b77f4b4de9d283820f9d98bcf496d5692934b857a2399d31ff22e6"
checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61"
[[package]]
name = "async-stream"
@@ -81,9 +81,9 @@ dependencies = [
[[package]]
name = "async-trait"
version = "0.1.60"
version = "0.1.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "677d1d8ab452a3936018a687b20e6f7cf5363d713b732b8884001317b0e48aa3"
checksum = "705339e0e4a9690e2908d2b3d049d85682cf19fbd5782494498fbf7003a6a282"
dependencies = [
"proc-macro2",
"quote",
@@ -107,9 +107,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "axum"
version = "0.6.1"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08b108ad2665fa3f6e6a517c3d80ec3e77d224c47d605167aefaa5d7ef97fa48"
checksum = "1304eab461cf02bd70b083ed8273388f9724c549b316ba3d1e213ce0e9e7fb7e"
dependencies = [
"async-trait",
"axum-core",
@@ -136,9 +136,9 @@ dependencies = [
[[package]]
name = "axum-core"
version = "0.3.0"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79b8558f5a0581152dc94dcd289132a1d377494bdeafcd41869b3258e3e2ad92"
checksum = "f487e40dc9daee24d8a1779df88522f159a54a980f99cfbe43db0be0bd3444a8"
dependencies = [
"async-trait",
"bytes",
@@ -223,6 +223,43 @@ dependencies = [
"winapi",
]
[[package]]
name = "clap"
version = "4.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ec7a4128863c188deefe750ac1d1dfe66c236909f845af04beed823638dc1b2"
dependencies = [
"bitflags",
"clap_derive",
"clap_lex",
"is-terminal",
"once_cell",
"strsim",
"termcolor",
]
[[package]]
name = "clap_derive"
version = "4.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "684a277d672e91966334af371f1a7b5833f9aa00b07c84e92fbce95e00208ce8"
dependencies = [
"heck",
"proc-macro-error",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "clap_lex"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "783fe232adfca04f90f56201b26d79682d4cd2625e0bc7290b95123afe558ade"
dependencies = [
"os_str_bytes",
]
[[package]]
name = "cloudabi"
version = "0.0.3"
@@ -382,9 +419,9 @@ dependencies = [
[[package]]
name = "cxx"
version = "1.0.84"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27874566aca772cb515af4c6e997b5fe2119820bca447689145e39bb734d19a0"
checksum = "51d1075c37807dcf850c379432f0df05ba52cc30f279c5cfc43cc221ce7f8579"
dependencies = [
"cc",
"cxxbridge-flags",
@@ -394,9 +431,9 @@ dependencies = [
[[package]]
name = "cxx-build"
version = "1.0.84"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7bb951f2523a49533003656a72121306b225ec16a49a09dc6b0ba0d6f3ec3c0"
checksum = "5044281f61b27bc598f2f6647d480aed48d2bf52d6eb0b627d84c0361b17aa70"
dependencies = [
"cc",
"codespan-reporting",
@@ -409,15 +446,15 @@ dependencies = [
[[package]]
name = "cxxbridge-flags"
version = "1.0.84"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be778b6327031c1c7b61dd2e48124eee5361e6aa76b8de93692f011b08870ab4"
checksum = "61b50bc93ba22c27b0d31128d2d130a0a6b3d267ae27ef7e4fae2167dfe8781c"
[[package]]
name = "cxxbridge-macro"
version = "1.0.84"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b8a2b87662fe5a0a0b38507756ab66aff32638876a0866e5a5fc82ceb07ee49"
checksum = "39e61fda7e62115119469c7b3591fd913ecca96fb766cfd3f2e2502ab7bc87a5"
dependencies = [
"proc-macro2",
"quote",
@@ -462,6 +499,27 @@ version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
[[package]]
name = "errno"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1"
dependencies = [
"errno-dragonfly",
"libc",
"winapi",
]
[[package]]
name = "errno-dragonfly"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
dependencies = [
"cc",
"libc",
]
[[package]]
name = "fallible-iterator"
version = "0.2.0"
@@ -728,10 +786,16 @@ dependencies = [
]
[[package]]
name = "hermit-abi"
version = "0.1.19"
name = "heck"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
[[package]]
name = "hermit-abi"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
dependencies = [
"libc",
]
@@ -890,6 +954,28 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "io-lifetimes"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7d6c6f8c91b4b9ed43484ad1a938e393caf35960fce7f82a040497207bd8e9e"
dependencies = [
"libc",
"windows-sys",
]
[[package]]
name = "is-terminal"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28dfb6c8100ccc63462345b67d1bbc3679177c75ee4bf59bf29c8b1d110b8189"
dependencies = [
"hermit-abi",
"io-lifetimes",
"rustix",
"windows-sys",
]
[[package]]
name = "itertools"
version = "0.10.5"
@@ -933,9 +1019,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.138"
version = "0.2.139"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8"
checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79"
[[package]]
name = "libsqlite3-sys"
@@ -963,6 +1049,12 @@ version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
[[package]]
name = "linux-raw-sys"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4"
[[package]]
name = "lock_api"
version = "0.4.9"
@@ -1051,7 +1143,7 @@ dependencies = [
"libc",
"log",
"wasi 0.11.0+wasi-snapshot-preview1",
"windows-sys 0.42.0",
"windows-sys",
]
[[package]]
@@ -1080,9 +1172,9 @@ checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c"
[[package]]
name = "nom"
version = "7.1.1"
version = "7.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36"
checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"
dependencies = [
"memchr",
"minimal-lexical",
@@ -1096,10 +1188,11 @@ checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21"
[[package]]
name = "nostr-rs-relay"
version = "0.7.13"
version = "0.7.17"
dependencies = [
"anyhow",
"bitcoin_hashes",
"clap",
"config",
"console-subscriber",
"const_format",
@@ -1209,9 +1302,9 @@ dependencies = [
[[package]]
name = "num_cpus"
version = "1.14.0"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5"
checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b"
dependencies = [
"hermit-abi",
"libc",
@@ -1219,15 +1312,15 @@ dependencies = [
[[package]]
name = "once_cell"
version = "1.16.0"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860"
checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66"
[[package]]
name = "openssl"
version = "0.10.44"
version = "0.10.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29d971fd5722fec23977260f6e81aa67d2f22cadbdc2aa049f1022d9a3be1566"
checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1"
dependencies = [
"bitflags",
"cfg-if",
@@ -1257,9 +1350,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
[[package]]
name = "openssl-sys"
version = "0.9.79"
version = "0.9.80"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5454462c0eced1e97f2ec09036abc8da362e66802f66fd20f86854d9d8cbcbc4"
checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7"
dependencies = [
"autocfg 1.1.0",
"cc",
@@ -1278,6 +1371,12 @@ dependencies = [
"hashbrown 0.9.1",
]
[[package]]
name = "os_str_bytes"
version = "6.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
[[package]]
name = "parking_lot"
version = "0.12.1"
@@ -1290,15 +1389,15 @@ dependencies = [
[[package]]
name = "parking_lot_core"
version = "0.9.5"
version = "0.9.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba"
checksum = "ba1ef8814b5c993410bb3adfad7a5ed269563e4a2f90c41f5d85be7fb47133bf"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"smallvec",
"windows-sys 0.42.0",
"windows-sys",
]
[[package]]
@@ -1326,9 +1425,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
[[package]]
name = "pest"
version = "2.5.1"
version = "2.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc8bed3549e0f9b0a2a78bf7c0018237a2cdf085eecbbc048e52612438e4e9d0"
checksum = "4257b4a04d91f7e9e6290be5d3da4804dd5784fafde3a497d73eb2b4a158c30a"
dependencies = [
"thiserror",
"ucd-trie",
@@ -1336,9 +1435,9 @@ dependencies = [
[[package]]
name = "pest_derive"
version = "2.5.1"
version = "2.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdc078600d06ff90d4ed238f0119d84ab5d43dbaad278b0e33a8820293b32344"
checksum = "241cda393b0cdd65e62e07e12454f1f25d57017dcc514b1514cd3c4645e3a0a6"
dependencies = [
"pest",
"pest_generator",
@@ -1346,9 +1445,9 @@ dependencies = [
[[package]]
name = "pest_generator"
version = "2.5.1"
version = "2.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28a1af60b1c4148bb269006a750cff8e2ea36aff34d2d96cf7be0b14d1bed23c"
checksum = "46b53634d8c8196302953c74d5352f33d0c512a9499bd2ce468fc9f4128fa27c"
dependencies = [
"pest",
"pest_meta",
@@ -1359,13 +1458,13 @@ dependencies = [
[[package]]
name = "pest_meta"
version = "2.5.1"
version = "2.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fec8605d59fc2ae0c6c1aefc0c7c7a9769732017c0ce07f7a9cfffa7b4404f20"
checksum = "0ef4f1332a8d4678b41966bb4cc1d0676880e84183a1ecc3f4b69f03e99c7a51"
dependencies = [
"once_cell",
"pest",
"sha1",
"sha2",
]
[[package]]
@@ -1413,19 +1512,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "proc-macro2"
version = "1.0.48"
name = "proc-macro-error"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e9d89e5dba24725ae5678020bf8f1357a9aa7ff10736b551adbcd3f8d17d766f"
checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
dependencies = [
"proc-macro-error-attr",
"proc-macro2",
"quote",
"syn",
"version_check",
]
[[package]]
name = "proc-macro-error-attr"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
dependencies = [
"proc-macro2",
"quote",
"version_check",
]
[[package]]
name = "proc-macro2"
version = "1.0.49"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5"
dependencies = [
"unicode-ident",
]
[[package]]
name = "prost"
version = "0.11.3"
version = "0.11.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0b18e655c21ff5ac2084a5ad0611e827b3f92badf79f4910b5a5c58f4d87ff0"
checksum = "21dc42e00223fc37204bd4aa177e69420c604ca4a183209a8f9de30c6d934698"
dependencies = [
"bytes",
"prost-derive",
@@ -1433,9 +1556,9 @@ dependencies = [
[[package]]
name = "prost-derive"
version = "0.11.2"
version = "0.11.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "164ae68b6587001ca506d3bf7f1000bfa248d0e1217b618108fba4ec1d0cc306"
checksum = "8bda8c0881ea9f722eb9629376db3d0b903b462477c1aafcb0566610ac28ac5d"
dependencies = [
"anyhow",
"itertools",
@@ -1446,9 +1569,9 @@ dependencies = [
[[package]]
name = "prost-types"
version = "0.11.2"
version = "0.11.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "747761bc3dc48f9a34553bf65605cf6cb6288ba219f3450b4275dbd81539551a"
checksum = "a5e0526209433e96d83d750dd81a99118edbc55739e7e61a46764fd2ad537788"
dependencies = [
"bytes",
"prost",
@@ -1472,9 +1595,9 @@ dependencies = [
[[package]]
name = "quote"
version = "1.0.22"
version = "1.0.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "556d0f47a940e895261e77dc200d5eadfc6ef644c179c6f5edfc105e3a2292c8"
checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b"
dependencies = [
"proc-macro2",
]
@@ -1665,9 +1788,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.7.0"
version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733"
dependencies = [
"aho-corasick",
"memchr",
@@ -1734,6 +1857,20 @@ dependencies = [
"ordered-multimap",
]
[[package]]
name = "rustix"
version = "0.36.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4feacf7db682c6c329c4ede12649cd36ecab0f3be5b7d74e6a20304725db4549"
dependencies = [
"bitflags",
"errno",
"io-lifetimes",
"libc",
"linux-raw-sys",
"windows-sys",
]
[[package]]
name = "rustversion"
version = "1.0.11"
@@ -1748,12 +1885,11 @@ checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde"
[[package]]
name = "schannel"
version = "0.1.20"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2"
checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3"
dependencies = [
"lazy_static",
"windows-sys 0.36.1",
"windows-sys",
]
[[package]]
@@ -1823,18 +1959,18 @@ dependencies = [
[[package]]
name = "serde"
version = "1.0.151"
version = "1.0.152"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97fed41fc1a24994d044e6db6935e69511a1153b52c15eb42493b26fa87feba0"
checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.151"
version = "1.0.152"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "255abe9a125a985c05190d687b320c12f9b1f0b99445e608c21ba0782c719ad8"
checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
dependencies = [
"proc-macro2",
"quote",
@@ -1843,9 +1979,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.90"
version = "1.0.91"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8778cc0b528968fe72abec38b5db5a20a70d148116cd9325d2bc5f5180ca3faf"
checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883"
dependencies = [
"indexmap",
"itoa",
@@ -1865,10 +2001,10 @@ dependencies = [
]
[[package]]
name = "sha1"
version = "0.10.5"
name = "sha2"
version = "0.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3"
checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0"
dependencies = [
"cfg-if",
"cpufeatures",
@@ -1919,10 +2055,16 @@ dependencies = [
]
[[package]]
name = "syn"
version = "1.0.106"
name = "strsim"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ee3a69cd2c7e06684677e5629b3878b253af05e4714964204279c6bc02cf0b"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "syn"
version = "1.0.107"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5"
dependencies = [
"proc-macro2",
"quote",
@@ -1951,9 +2093,9 @@ dependencies = [
[[package]]
name = "termcolor"
version = "1.1.3"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"
dependencies = [
"winapi-util",
]
@@ -2004,9 +2146,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
[[package]]
name = "tokio"
version = "1.23.0"
version = "1.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eab6d665857cc6ca78d6e80303a02cea7a7851e85dfbd77cbdc09bd129f1ef46"
checksum = "1d9f76183f91ecfb55e1d7d5602bd1d979e38a3a522fe900241cf195624d67ae"
dependencies = [
"autocfg 1.1.0",
"bytes",
@@ -2020,7 +2162,7 @@ dependencies = [
"socket2",
"tokio-macros",
"tracing",
"windows-sys 0.42.0",
"windows-sys",
]
[[package]]
@@ -2287,9 +2429,9 @@ dependencies = [
[[package]]
name = "try-lock"
version = "0.2.3"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642"
checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed"
[[package]]
name = "tungstenite"
@@ -2516,19 +2658,6 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows-sys"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
dependencies = [
"windows_aarch64_msvc 0.36.1",
"windows_i686_gnu 0.36.1",
"windows_i686_msvc 0.36.1",
"windows_x86_64_gnu 0.36.1",
"windows_x86_64_msvc 0.36.1",
]
[[package]]
name = "windows-sys"
version = "0.42.0"
@@ -2536,85 +2665,55 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc 0.42.0",
"windows_i686_gnu 0.42.0",
"windows_i686_msvc 0.42.0",
"windows_x86_64_gnu 0.42.0",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc 0.42.0",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.0"
version = "0.42.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e"
checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608"
[[package]]
name = "windows_aarch64_msvc"
version = "0.36.1"
version = "0.42.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4"
checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7"
[[package]]
name = "windows_i686_gnu"
version = "0.36.1"
version = "0.42.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
[[package]]
name = "windows_i686_gnu"
version = "0.42.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7"
checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640"
[[package]]
name = "windows_i686_msvc"
version = "0.36.1"
version = "0.42.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
[[package]]
name = "windows_i686_msvc"
version = "0.42.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246"
checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605"
[[package]]
name = "windows_x86_64_gnu"
version = "0.36.1"
version = "0.42.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed"
checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.0"
version = "0.42.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028"
checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463"
[[package]]
name = "windows_x86_64_msvc"
version = "0.36.1"
version = "0.42.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5"
checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd"
[[package]]
name = "yaml-rust"

View File

@@ -1,6 +1,6 @@
[package]
name = "nostr-rs-relay"
version = "0.7.13"
version = "0.7.17"
edition = "2021"
authors = ["Greg Heartsfield <scsibug@imap.cc>"]
description = "A relay implementation for the Nostr protocol"
@@ -12,6 +12,7 @@ keywords = ["nostr", "server"]
categories = ["network-programming", "web-programming"]
[dependencies]
clap = { version = "4.0.32", features = ["env", "default", "derive"]}
tracing = "0.1.36"
tracing-subscriber = "0.2.0"
tokio = { version = "1", features = ["full", "tracing", "signal"] }
@@ -28,7 +29,7 @@ secp256k1 = {version = "0.21", features = ["rand", "rand-std", "serde", "bitcoin
serde = { version = "1.0", features = ["derive"] }
serde_json = {version = "1.0", features = ["preserve_order"]}
hex = "0.4"
rusqlite = { version = "0.26", features = ["limits","bundled"]}
rusqlite = { version = "0.26", features = ["limits","bundled","modern_sqlite", "trace"]}
r2d2 = "0.8"
r2d2_sqlite = "0.19"
lazy_static = "1.4"

View File

@@ -1,4 +1,4 @@
FROM docker.io/library/rust:1.66.0@sha256:359949280cebefe93ccb33089fe25111a3aadfe99eac4b6cbe8ec3e1b571dacb as builder
FROM docker.io/library/rust:1.66.0 as builder
RUN USER=root cargo install cargo-auditable
RUN USER=root cargo new --bin nostr-rs-relay
@@ -17,7 +17,7 @@ COPY ./src ./src
RUN rm ./target/release/deps/nostr*relay*
RUN cargo auditable build --release --locked
FROM docker.io/library/debian:bullseye-20221205-slim@sha256:25f10b4f1ded5341a3ca0a30290ff3cd5639415f0c5a2222d5e7d5dd72952aa1
FROM docker.io/library/debian:bullseye-slim
ARG APP=/usr/src/app
ARG APP_DATA=/usr/src/app/db

View File

@@ -28,7 +28,8 @@ mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
- [x] NIP-16: [Event Treatment](https://github.com/nostr-protocol/nips/blob/master/16.md)
- [x] NIP-20: [Command Results](https://github.com/nostr-protocol/nips/blob/master/20.md)
- [x] NIP-22: [Event `created_at` limits](https://github.com/nostr-protocol/nips/blob/master/22.md) (_future-dated events only_)
- [x] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md)
- [ ] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md) (_implemented, but currently disabled_)
- [x] NIP-28: [Public Chat](https://github.com/nostr-protocol/nips/blob/master/28.md)
## Quick Start
@@ -81,6 +82,38 @@ Text Note [81cf...2652] from 296a...9b92 5 seconds ago
A pre-built container is also available on DockerHub:
https://hub.docker.com/r/scsibug/nostr-rs-relay
## Build and Run (without Docker)
Building `nostr-rs-relay` requires an installation of Cargo & Rust: https://www.rust-lang.org/tools/install
Clone this repository, and then build a release version of the relay:
```console
$ git clone -q https://git.sr.ht/\~gheartsfield/nostr-rs-relay
$ cd nostr-rs-relay
$ cargo build -q -r
```
The relay executable is now located in
`target/release/nostr-rs-relay`. In order to run it with logging
enabled, execute it with the `RUST_LOG` variable set:
```console
$ RUST_LOG=warn,nostr_rs_relay=info ./target/release/nostr-rs-relay
Dec 26 10:31:56.455 INFO nostr_rs_relay: Starting up from main
Dec 26 10:31:56.464 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080
Dec 26 10:31:56.466 INFO nostr_rs_relay::server: db writer created
Dec 26 10:31:56.466 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=2)
Dec 26 10:31:56.466 INFO nostr_rs_relay::db: opened database "./nostr.db" for writing
Dec 26 10:31:56.466 INFO nostr_rs_relay::schema: DB version = 11
Dec 26 10:31:56.467 INFO nostr_rs_relay::db: Built a connection pool "maintenance writer" (min=1, max=2)
Dec 26 10:31:56.467 INFO nostr_rs_relay::server: control message listener started
Dec 26 10:31:56.468 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=8)
```
You now have a running relay, on port `8080`. Use a `nostr` client or
`websocat` to connect and send/query for events.
## Configuration
The sample [`config.toml`](config.toml) file demonstrates the
@@ -115,3 +148,8 @@ To chat about `nostr-rs-relay` on `nostr` itself; visit our channel on [anigma](
License
---
This project is MIT licensed.
External Documentation and Links
---
* [BlockChainCaffe's Nostr Relay Setup Guide](https://github.com/BlockChainCaffe/Nostr-Relay-Setup-Guide)

View File

@@ -18,7 +18,7 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
[diagnostics]
# Enable tokio tracing (for use with tokio-console)
#tracing = true
#tracing = false
[database]
# Directory for SQLite files. Defaults to the current directory. Can
@@ -36,8 +36,9 @@ data_directory = "."
# Minimum number of SQLite reader connections
#min_conn = 4
# Maximum number of SQLite reader connections
#max_conn = 128
# Maximum number of SQLite reader connections. Recommend setting this
# to approx the number of cores.
#max_conn = 8
[network]
# Bind to this network address
@@ -99,6 +100,11 @@ reject_future_seconds = 1800
# backpressure to senders if writes are slow.
#event_persist_buffer = 4096
# Event kind blacklist. Events with these kinds will be discarded.
#event_kind_blacklist = [
# 70202,
#]
[authorization]
# Pubkey addresses in this array are whitelisted for event publishing.
# Only valid events by these authors will be accepted, if the variable

View File

@@ -0,0 +1,125 @@
# Database Maintenance
`nostr-rs-relay` uses the SQLite embedded database to minimize
dependencies and overall footprint of running a relay. If traffic is
light, the relay should just run with very little need for
intervention. For heavily trafficked relays, there are a number of
steps that the operator may need to take to maintain performance and
limit disk usage.
This maintenance guide is current as of version `0.7.14`. Future
versions may incorporate and automate some of these steps.
## Backing Up the Database
To prevent data loss, the database should be backed up regularly. The
recommended method is to use the `sqlite3` command to perform an
"Online Backup". This can be done while the relay is running, queries
can still run and events will be persisted during the backup.
The following commands will perform a backup of the database to a
dated file, and then compress to minimize size:
```console
BACKUP_FILE=/var/backups/nostr/`date +%Y%m%d_%H%M`.db
sqlite3 -readonly /apps/nostr-relay/nostr.db ".backup $BACKUP_FILE
sqlite3 $BACKUP_FILE "vacuum;"
bzip2 -9 $BACKUP_FILE
```
Nostr events are very compressible. Expect a compression ratio on the
order of 4:1, resulting in a 75% space saving.
## Vacuuming the Database
As the database is updated, it can become fragmented. Performing a
full `vacuum` will rebuild the entire database file, and can reduce
space. Running this may reduce the size of the database file,
especially if a large amount of data was updated or deleted.
```console
vacuum;
```
## Clearing Hidden Events
When events are deleted, either through deletion events, metadata or
follower updates, or a replaceable event kind, the event is not
actually removed from the database. Instead, a flag `HIDDEN` is set
to true for the event, which excludes it from search results. The
original intent was to ensure that subsequent rebroadcasts of the
event would be easily detected as having been deleted, and would not
need to be stored again. In practice, this decision causes excessive
growth of the `tags` table, since all the previous followers are
retained for those `HIDDEN` events.
The `event` and especially the `tag` table can be significantly
reduced in size by running these commands:
```console
PRAGMA foreign_keys = ON;
delete from event where HIDDEN=true;
```
## Manually Removing Events
For a variety of reasons, an operator may wish to remove some events
from the database. The only way of achieving this today is with
manually run SQL commands.
It is recommended to have a good backup prior to manually running SQL
commands!
In all cases, it is mandatory to enable foreign keys, and this must be
done for every connection. Otherwise, you will likely orphan rows in
the `tag` table.
### Deleting Specific Event
```console
PRAGMA foreign_keys = ON;
delete from event where event_hash=x'00000000000c1271675dc86e3e1dd1336827bccabb90dc4c9d3b4465efefe00e';
```
### Deleting All Events for Pubkey
```console
PRAGMA foreign_keys = ON;
delete from event where author=x'000000000002c7831d9c5a99f183afc2813a6f69a16edda7f6fc0ed8110566e6';
```
### Deleting All Events of a Kind
```console
PRAGMA foreign_keys = ON;
delete from event where kind=70202;
```
### Deleting Old Events
In this scenario, we wish to delete any event that has been stored by
our relay for more than 1 month. Crucially, this is based on when the
event was stored, not when the event says it was created. If an event
has a `created` field of 2 years ago, but was first sent to our relay
yesterday, it would not be deleted in this scenario. Keep in mind, we
do not track anything for re-broadcast events that we already have, so
this is not a very effective way of implementing a "least recently
seen" policy.
```console
PRAGMA foreign_keys = ON;
TODO!
```
### Delete Profile Events with No Recent Events
Many users create profiles, post a "hello world" event, and then never
appear again (likely using an ephemeral keypair that was lost in the
browser cache). We can find these accounts and remove them after some
time.
```console
PRAGMA foreign_keys = ON;
TODO!
```

View File

@@ -1,3 +0,0 @@
#!/usr/bin/env bash
sed -E 's/@sha256:[[:alnum:]]+//g' Dockerfile > Dockerfile.any-platform
echo "Created platform-agnostic Dockerfile in 'Dockerfile.any-platform'"

View File

@@ -68,8 +68,25 @@ http {
server_name relay.example.com;
ssl_certificate /etc/letsencrypt/live/relay.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/relay.example.com/privkey.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_protocols TLSv1.3 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ecdh_curve secp521r1:secp384r1;
ssl_ciphers EECDH+AESGCM:EECDH+AES256;
# Optional Diffie-Helmann parameters
# Generate with openssl dhparam -out /etc/ssl/certs/dhparam.pem 4096
#ssl_dhparam /etc/ssl/certs/dhparam.pem;
ssl_session_cache shared:TLS:2m;
ssl_buffer_size 4k;
# OCSP stapling
ssl_stapling on;
ssl_stapling_verify on;
resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001]; # Cloudflare
# Set HSTS to 365 days
add_header Strict-Transport-Security 'max-age=31536000; includeSubDomains; preload' always;
keepalive_timeout 70;
location / {

176
src/bin/bulkloader.rs Normal file
View File

@@ -0,0 +1,176 @@
use std::io;
use std::path::Path;
use nostr_rs_relay::utils::is_lower_hex;
use tracing::*;
use nostr_rs_relay::config;
use nostr_rs_relay::event::{Event,single_char_tagname};
use nostr_rs_relay::error::{Error, Result};
use nostr_rs_relay::db::build_pool;
use nostr_rs_relay::schema::{curr_db_version, DB_VERSION};
use rusqlite::{OpenFlags, Transaction};
use nostr_rs_relay::db::PooledConnection;
use std::sync::mpsc;
use std::thread;
use rusqlite::params;
/// Bulk load JSONL data from STDIN to the database specified in config.toml (or ./nostr.db as a default).
/// The database must already exist, this will not create a new one.
/// Tested against schema v13.
pub fn main() -> Result<()> {
let _trace_sub = tracing_subscriber::fmt::try_init();
println!("Nostr-rs-relay Bulk Loader");
// check for a database file, or create one.
let settings = config::Settings::new();
if !Path::new(&settings.database.data_directory).is_dir() {
info!("Database directory does not exist");
return Err(Error::DatabaseDirError);
}
// Get a database pool
let pool = build_pool("bulk-loader", &settings, OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, 1,4,false);
{
// check for database schema version
let mut conn: PooledConnection = pool.get()?;
let version = curr_db_version(&mut conn)?;
info!("current version is: {:?}", version);
// ensure the schema version is current.
if version != DB_VERSION {
info!("version is not current, exiting");
panic!("cannot write to schema other than v{}", DB_VERSION);
}
}
// this channel will contain parsed events ready to be inserted
let (event_tx, event_rx) = mpsc::sync_channel(100_000);
// Thread for reading events
let _stdin_reader_handler = thread::spawn(move || {
let stdin = io::stdin();
for readline in stdin.lines() {
if let Ok(line) = readline {
// try to parse a nostr event
let eres: Result<Event, serde_json::Error> = serde_json::from_str(&line);
if let Ok(mut e) = eres {
if let Ok(()) = e.validate() {
e.build_index();
//debug!("Event: {:?}", e);
event_tx.send(Some(e)).ok();
} else {
info!("could not validate event");
}
} else {
info!("error reading event: {:?}", eres);
}
} else {
// error reading
info!("error reading: {:?}", readline);
}
}
info!("finished parsing events");
event_tx.send(None).ok();
let ok: Result<()> = Ok(());
return ok;
});
let mut conn: PooledConnection = pool.get()?;
let mut events_read = 0;
let event_batch_size =50_000;
let mut new_events = 0;
let mut has_more_events = true;
while has_more_events {
// begin a transaction
let tx = conn.transaction()?;
// read in batch_size events and commit
for _ in 0..event_batch_size {
match event_rx.recv() {
Ok(Some(e)) => {
events_read += 1;
// ignore ephemeral events
if !(e.kind >= 20000 && e.kind < 30000) {
match write_event(&tx, e) {
Ok(c) => {
new_events += c;
},
Err(e) => {
info!("error inserting event: {:?}", e);
}
}
}
},
Ok(None) => {
// signal that the sender will never produce more
// events
has_more_events=false;
break;
},
Err(_) => {
info!("sender is closed");
// sender is done
}
}
}
info!("committed {} events...", new_events);
tx.commit()?;
conn.execute_batch("pragma wal_checkpoint(truncate)")?;
}
info!("processed {} events", events_read);
info!("stored {} new events", new_events);
// get a connection for writing events
// read standard in.
info!("finished reading input");
Ok(())
}
/// Write an event and update the tag table.
/// Assumes the event has its index built.
fn write_event(tx: &Transaction, e: Event) -> Result<usize> {
let id_blob = hex::decode(&e.id).ok();
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
let delegator_blob: Option<Vec<u8>> = e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
let event_str = serde_json::to_string(&e).ok();
// ignore if the event hash is a duplicate.
let ins_count = tx.execute(
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, strftime('%s','now'), FALSE);",
params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str]
)?;
if ins_count == 0 {
return Ok(0);
}
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_id = tx.last_insert_rowid();
// look at each event, and each tag, creating new tag entries if appropriate.
for t in e.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
// safe because len was > 1
let tagval = t.get(1).unwrap();
// insert as BLOB if we can restore it losslessly.
// this means it needs to be even length and lowercase.
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
tx.execute(
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
params![event_id, tagname, hex::decode(tagval).ok()],
)?;
} else {
// otherwise, insert as text
tx.execute(
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
params![event_id, tagname, &tagval],
)?;
}
}
if e.is_replaceable() {
//let query = "SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1;";
//let count: usize = tx.query_row(query, params![e.kind, pubkey_blob], |row| row.get(0))?;
//info!("found {} rows that /would/ be preserved", count);
match tx.execute(
"DELETE FROM event WHERE kind=? and author=? and id NOT IN (SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1);",
params![e.kind, pubkey_blob, e.kind, pubkey_blob],
) {
Ok(_) => {},
Err(x) => {info!("error deleting replaceable event: {:?}",x);}
}
}
Ok(ins_count)
}

14
src/cli.rs Normal file
View File

@@ -0,0 +1,14 @@
use clap::Parser;
#[derive(Parser)]
#[command(about = "A nostr relay written in Rust", author = env!("CARGO_PKG_AUTHORS"), version = env!("CARGO_PKG_VERSION"))]
pub struct CLIArgs {
#[arg(
short,
long,
help = "Use the <directory> as the location of the database",
default_value = ".",
required = false
)]
pub db: String,
}

View File

@@ -60,6 +60,7 @@ pub struct Limits {
pub max_ws_frame_bytes: Option<usize>,
pub broadcast_buffer: usize, // events to buffer for subscribers (prevents slow readers from consuming memory)
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
pub event_kind_blacklist: Option<Vec<u64>>
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -207,7 +208,7 @@ impl Default for Settings {
data_directory: ".".to_owned(),
in_memory: false,
min_conn: 4,
max_conn: 128,
max_conn: 8,
},
network: Network {
port: 8080,
@@ -225,6 +226,7 @@ impl Default for Settings {
max_ws_frame_bytes: Some(2 << 17), // 128K
broadcast_buffer: 16384,
event_persist_buffer: 4096,
event_kind_blacklist: None,
},
authorization: Authorization {
pubkey_whitelist: None, // Allow any address to publish

322
src/db.rs
View File

@@ -19,8 +19,10 @@ use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::params;
use rusqlite::types::ToSql;
use rusqlite::OpenFlags;
use tokio::sync::{Mutex, MutexGuard};
use std::fmt::Write as _;
use std::path::Path;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use std::time::Instant;
@@ -39,13 +41,8 @@ pub struct SubmittedEvent {
/// Database file
pub const DB_FILE: &str = "nostr.db";
/// How frequently to run maintenance
/// How many persisted events before DB maintenannce is triggered.
pub const EVENT_MAINTENANCE_FREQ_SEC: u64 = 60;
/// How many persisted events before we pause for backups.
/// It isn't clear this is enough to make the online backup API work yet.
pub const EVENT_COUNT_BACKUP_PAUSE_TRIGGER: usize = 1000;
/// How frequently to attempt checkpointing
pub const CHECKPOINT_FREQ_SEC: u64 = 60;
/// Build a database connection pool.
/// # Panics
@@ -94,6 +91,16 @@ pub fn build_pool(
pool
}
/// Display database pool stats every 1 minute
pub async fn monitor_pool(name: &str, pool: SqlitePool) {
let sleep_dur = Duration::from_secs(60);
loop {
log_pool_stats(name, &pool);
tokio::time::sleep(sleep_dur).await;
}
}
/// Perform normal maintenance
pub fn optimize_db(conn: &mut PooledConnection) -> Result<()> {
let start = Instant::now();
@@ -102,15 +109,15 @@ pub fn optimize_db(conn: &mut PooledConnection) -> Result<()> {
Ok(())
}
#[derive(Debug)]
enum SqliteReturnStatus {
SqliteOk,
SqliteBusy,
SqliteError,
SqliteOther(u64),
enum SqliteStatus {
Ok,
Busy,
Error,
Other(u64),
}
/// Checkpoint/Truncate WAL
pub fn checkpoint_db(conn: &mut PooledConnection) -> Result<()> {
/// Checkpoint/Truncate WAL. Returns the number of WAL pages remaining.
pub fn checkpoint_db(conn: &mut PooledConnection) -> Result<usize> {
let query = "PRAGMA wal_checkpoint(TRUNCATE);";
let start = Instant::now();
let (cp_result, wal_size, _frames_checkpointed) = conn.query_row(query, [], |row| {
@@ -120,10 +127,10 @@ pub fn checkpoint_db(conn: &mut PooledConnection) -> Result<()> {
Ok((checkpoint_result, wal_size, frames_checkpointed))
})?;
let result = match cp_result {
0 => SqliteReturnStatus::SqliteOk,
1 => SqliteReturnStatus::SqliteBusy,
2 => SqliteReturnStatus::SqliteError,
x => SqliteReturnStatus::SqliteOther(x),
0 => SqliteStatus::Ok,
1 => SqliteStatus::Busy,
2 => SqliteStatus::Error,
x => SqliteStatus::Other(x),
};
info!(
"checkpoint ran in {:?} (result: {:?}, WAL size: {})",
@@ -131,7 +138,7 @@ pub fn checkpoint_db(conn: &mut PooledConnection) -> Result<()> {
result,
wal_size
);
Ok(())
Ok(wal_size as usize)
}
/// Spawn a database writer that persists events to the SQLite store.
@@ -173,10 +180,6 @@ pub async fn db_writer(
let rps_setting = settings.limits.messages_per_sec;
let mut most_recent_rate_limit = Instant::now();
let mut lim_opt = None;
// Constant writing has interfered with online backups. Keep
// track of how long since we've given the backups a chance to
// run.
let mut backup_pause_counter: usize = 0;
let clock = governor::clock::QuantaClock::default();
if let Some(rps) = rps_setting {
if rps > 0 {
@@ -207,8 +210,8 @@ pub async fn db_writer(
// TODO: incorporate delegated pubkeys
// if the event address is not in allowed_addrs.
if !allowed_addrs.contains(&event.pubkey) {
info!(
"Rejecting event {}, unauthorized author",
debug!(
"rejecting event: {}, unauthorized author",
event.get_event_id_prefix()
);
notice_tx
@@ -221,6 +224,25 @@ pub async fn db_writer(
}
}
// Check that event kind isn't blacklisted
let kinds_blacklist = &settings.limits.event_kind_blacklist.clone();
if let Some(event_kind_blacklist) = kinds_blacklist {
if event_kind_blacklist.contains(&event.kind) {
debug!(
"rejecting event: {}, blacklisted kind: {}",
&event.get_event_id_prefix(),
&event.kind
);
notice_tx
.try_send(Notice::blocked(
event.id,
"event kind is blocked by relay"
))
.ok();
continue;
}
}
// send any metadata events to the NIP-05 verifier
if nip05_active && event.is_kind_metadata() {
// we are sending this prior to even deciding if we
@@ -286,7 +308,6 @@ pub async fn db_writer(
);
event_write = true
} else {
log_pool_stats("writer", &pool);
match write_event(&mut pool.get()?, &event) {
Ok(updated) => {
if updated == 0 {
@@ -294,8 +315,9 @@ pub async fn db_writer(
notice_tx.try_send(Notice::duplicate(event.id)).ok();
} else {
info!(
"persisted event: {:?} from: {:?} in: {:?}",
"persisted event: {:?} (kind: {}) from: {:?} in: {:?}",
event.get_event_id_prefix(),
event.kind,
event.get_author_prefix(),
start.elapsed()
);
@@ -311,12 +333,6 @@ pub async fn db_writer(
notice_tx.try_send(Notice::error(event.id, msg)).ok();
}
}
backup_pause_counter += 1;
if backup_pause_counter > EVENT_COUNT_BACKUP_PAUSE_TRIGGER {
info!("pausing db write thread for a moment...");
thread::sleep(Duration::from_millis(500));
backup_pause_counter = 0
}
}
// use rate limit, if defined, and if an event was actually written.
@@ -349,6 +365,9 @@ pub async fn db_writer(
/// Persist an event to the database, returning rows added.
pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
// enable auto vacuum
conn.execute_batch("pragma auto_vacuum = FULL")?;
// start transaction
let tx = conn.transaction()?;
// get relevant fields from event and convert to blobs.
@@ -356,6 +375,15 @@ pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
let delegator_blob: Option<Vec<u8>> = e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
let event_str = serde_json::to_string(&e).ok();
// check for replaceable events that would hide this one; we won't even attempt to insert these.
if e.is_replaceable() {
let repl_count = tx.query_row(
"SELECT e.id FROM event e INDEXED BY author_index WHERE e.author=? AND e.kind=? AND e.created_at > ? LIMIT 1;",
params![pubkey_blob, e.kind, e.created_at], |row| row.get::<usize, usize>(0));
if repl_count.ok().is_some() {
return Ok(0);
}
}
// ignore if the event hash is a duplicate.
let mut ins_count = tx.execute(
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, strftime('%s','now'), FALSE);",
@@ -396,17 +424,19 @@ pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
}
}
}
// if this event is replaceable update, hide every other replaceable
// if this event is replaceable update, remove other replaceable
// event with the same kind from the same author that was issued
// earlier than this.
if e.kind == 0 || e.kind == 3 || (e.kind >= 10000 && e.kind < 20000) {
if e.is_replaceable() {
let author = hex::decode(&e.pubkey).ok();
// this is a backwards check - hide any events that were older.
let update_count = tx.execute(
"UPDATE event SET hidden=TRUE WHERE id!=? AND kind=? AND author=? AND created_at <= ? and hidden!=TRUE",
params![ev_id, e.kind, hex::decode(&e.pubkey).ok(), e.created_at],
"DELETE FROM event WHERE kind=? and author=? and id NOT IN (SELECT id FROM event INDEXED BY author_kind_index WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1)",
params![e.kind, author, e.kind, author],
)?;
if update_count > 0 {
info!(
"hid {} older replaceable kind {} events for author: {:?}",
"removed {} older replaceable kind {} events for author: {:?}",
update_count,
e.kind,
e.get_author_prefix()
@@ -481,8 +511,40 @@ fn repeat_vars(count: usize) -> String {
s
}
/// Create a dynamic SQL subquery and params from a subscription filter.
fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
/// Decide if there is an index that should be used explicitly
fn override_index(f: &ReqFilter) -> Option<String> {
// queries for multiple kinds default to kind_index, which is
// significantly slower than kind_created_at_index.
if let Some(ks) = &f.kinds {
if f.ids.is_none() &&
ks.len() > 1 &&
f.since.is_none() &&
f.until.is_none() &&
f.tags.is_none() &&
f.authors.is_none() {
return Some("kind_created_at_index".into());
}
}
// if there is an author, it is much better to force the authors index.
if let Some(_) = &f.authors {
if f.since.is_none() && f.until.is_none() {
if f.kinds.is_none() {
// with no use of kinds/created_at, just author
return Some("author_index".into());
} else {
// prefer author_kind if there are kinds
return Some("author_kind_index".into());
}
} else {
// finally, prefer author_created_at if time is provided
return Some("author_created_at_index".into());
}
}
None
}
/// Create a dynamic SQL subquery and params from a subscription filter (and optional explicit index used)
fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<String>) {
// build a dynamic SQL query. all user-input is either an integer
// (sqli-safe), or a string that is filtered to only contain
// hexadecimal characters. Strings that require escaping (tag
@@ -493,10 +555,13 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
let empty_query = "SELECT e.content, e.created_at FROM event e WHERE 1=0".to_owned();
// query parameters for SQLite
let empty_params: Vec<Box<dyn ToSql>> = vec![];
return (empty_query, empty_params);
return (empty_query, empty_params, None);
}
let mut query = "SELECT e.content, e.created_at FROM event e".to_owned();
// check if the index needs to be overriden
let idx_name = override_index(f);
let idx_stmt = idx_name.as_ref().map_or_else(|| "".to_owned(), |i| format!("INDEXED BY {}",i));
let mut query = format!("SELECT e.content, e.created_at FROM event e {}", idx_stmt);
// query parameters for SQLite
let mut params: Vec<Box<dyn ToSql>> = vec![];
@@ -509,22 +574,18 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
for auth in authvec {
match hex_range(auth) {
Some(HexSearch::Exact(ex)) => {
auth_searches.push("author=? OR delegated_by=?".to_owned());
params.push(Box::new(ex.clone()));
auth_searches.push("author=?".to_owned());
params.push(Box::new(ex));
}
Some(HexSearch::Range(lower, upper)) => {
auth_searches.push(
"(author>? AND author<?) OR (delegated_by>? AND delegated_by<?)".to_owned(),
"(author>? AND author<?)".to_owned(),
);
params.push(Box::new(lower.clone()));
params.push(Box::new(upper.clone()));
params.push(Box::new(lower));
params.push(Box::new(upper));
}
Some(HexSearch::LowerOnly(lower)) => {
auth_searches.push("author>? OR delegated_by>?".to_owned());
params.push(Box::new(lower.clone()));
auth_searches.push("author>?".to_owned());
params.push(Box::new(lower));
}
None => {
@@ -533,13 +594,11 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
}
}
if !authvec.is_empty() {
let authors_clause = format!("({})", auth_searches.join(" OR "));
filter_components.push(authors_clause);
let auth_clause = format!("({})", auth_searches.join(" OR "));
filter_components.push(auth_clause);
} else {
// if the authors list was empty, we should never return
// any results.
filter_components.push("false".to_owned());
}
filter_components.push("false".to_owned());
}
}
// Query for Kind
if let Some(ks) = &f.kinds {
@@ -636,19 +695,23 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
} else {
query.push_str(" ORDER BY e.created_at ASC")
}
(query, params)
(query, params, idx_name)
}
/// Create a dynamic SQL query string and params from a subscription.
fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>, Vec<String>) {
// build a dynamic SQL query for an entire subscription, based on
// SQL subqueries for filters.
let mut subqueries: Vec<String> = Vec::new();
let mut indexes = vec![];
// subquery params
let mut params: Vec<Box<dyn ToSql>> = vec![];
// for every filter in the subscription, generate a subquery
for f in sub.filters.iter() {
let (f_subquery, mut f_params) = query_from_filter(f);
let (f_subquery, mut f_params, index) = query_from_filter(f);
if let Some(i) = index {
indexes.push(i);
}
subqueries.push(f_subquery);
params.append(&mut f_params);
}
@@ -658,7 +721,7 @@ fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
.map(|s| format!("SELECT distinct content, created_at FROM ({})", s))
.collect();
let query: String = subqueries_selects.join(" UNION ");
(query, params)
(query, params,indexes)
}
/// Check if the pool is fully utilized
@@ -671,35 +734,70 @@ fn _pool_at_capacity(pool: &SqlitePool) -> bool {
fn log_pool_stats(name: &str, pool: &SqlitePool) {
let state: r2d2::State = pool.state();
let in_use_cxns = state.connections - state.idle_connections;
trace!(
"DB pool {:?} usage (in_use: {}, available: {})",
debug!(
"DB pool {:?} usage (in_use: {}, available: {}, max: {})",
name,
in_use_cxns,
state.connections
state.connections,
pool.max_size()
);
if state.connections == in_use_cxns {
debug!("DB pool {:?} is empty (in_use: {})", name, in_use_cxns);
}
}
/// Perform database maintenance on a regular basis
pub async fn db_maintenance(pool: SqlitePool) {
pub async fn db_optimize_task(pool: SqlitePool) {
tokio::task::spawn(async move {
loop {
tokio::select! {
_ = tokio::time::sleep(Duration::from_secs(EVENT_MAINTENANCE_FREQ_SEC)) => {
if let Ok(mut conn) = pool.get() {
// the busy timer will block writers, so don't set
// this any higher than you want max latency for event
// writes.
conn.busy_timeout(Duration::from_secs(1)).ok();
debug!("running database optimizer");
optimize_db(&mut conn).ok();
debug!("running wal_checkpoint(TRUNCATE)");
checkpoint_db(&mut conn).ok();
}
}
};
_ = tokio::time::sleep(Duration::from_secs(60*60)) => {
if let Ok(mut conn) = pool.get() {
// the busy timer will block writers, so don't set
// this any higher than you want max latency for event
// writes.
info!("running database optimizer");
optimize_db(&mut conn).ok();
}
}
};
}
});
}
/// Perform database WAL checkpoint on a regular basis
pub async fn db_checkpoint_task(pool: SqlitePool, safe_to_read: Arc<Mutex<u64>>) {
tokio::task::spawn(async move {
// WAL size in pages.
let mut current_wal_size = 0;
// WAL threshold for more aggressive checkpointing (10,000 pages, or about 40MB)
let wal_threshold = 1000*10;
// default threshold for the busy timer
let busy_wait_default = Duration::from_secs(1);
// if the WAL file is getting too big, switch to this
let busy_wait_default_long = Duration::from_secs(10);
loop {
tokio::select! {
_ = tokio::time::sleep(Duration::from_secs(CHECKPOINT_FREQ_SEC)) => {
if let Ok(mut conn) = pool.get() {
let mut _guard:Option<MutexGuard<u64>> = None;
// the busy timer will block writers, so don't set
// this any higher than you want max latency for event
// writes.
if current_wal_size <= wal_threshold {
conn.busy_timeout(busy_wait_default).ok();
} else {
// if the wal size has exceeded a threshold, increase the busy timeout.
conn.busy_timeout(busy_wait_default_long).ok();
// take a lock that will prevent new readers.
info!("blocking new readers to perform wal_checkpoint");
_guard = Some(safe_to_read.lock().await);
}
debug!("running wal_checkpoint(TRUNCATE)");
if let Ok(new_size) = checkpoint_db(&mut conn) {
current_wal_size = new_size;
}
}
}
};
}
});
}
@@ -716,14 +814,19 @@ pub async fn db_query(
pool: SqlitePool,
query_tx: tokio::sync::mpsc::Sender<QueryResult>,
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
safe_to_read: Arc<Mutex<u64>>,
) {
let pre_spawn_start = Instant::now();
task::spawn_blocking(move || {
{
// if we are waiting on a checkpoint, stop until it is complete
let _ = safe_to_read.blocking_lock();
}
let db_queue_time = pre_spawn_start.elapsed();
// if the queue time was very long (>5 seconds), spare the DB and abort.
if db_queue_time > Duration::from_secs(5) {
info!(
"shedding DB query load from {:?} (cid: {}, sub: {:?})",
"shedding DB query load queued for {:?} (cid: {}, sub: {:?})",
db_queue_time, client_id, sub.id
);
return Ok(());
@@ -738,13 +841,12 @@ pub async fn db_query(
let start = Instant::now();
let mut row_count: usize = 0;
// generate SQL query
let (q, p) = query_from_sub(&sub);
let (q, p, idxs) = query_from_sub(&sub);
let sql_gen_elapsed = start.elapsed();
if sql_gen_elapsed > Duration::from_millis(10) {
debug!("SQL (slow) generated in {:?}", start.elapsed());
}
// show pool stats
log_pool_stats("reader", &pool);
// cutoff for displaying slow queries
let slow_cutoff = Duration::from_millis(2000);
// any client that doesn't cause us to generate new rows in 5
@@ -753,46 +855,43 @@ pub async fn db_query(
let start = Instant::now();
let mut slow_first_event;
let mut last_successful_send = Instant::now();
if let Ok(conn) = pool.get() {
// execute the query. Don't cache, since queries vary so much.
let mut stmt = conn.prepare(&q)?;
if let Ok(mut conn) = pool.get() {
// execute the query.
// make the actual SQL query (with parameters inserted) available
conn.trace(Some(|x| {trace!("SQL trace: {:?}", x)}));
let mut stmt = conn.prepare_cached(&q)?;
let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?;
let mut first_result = true;
while let Some(row) = event_rows.next()? {
let first_event_elapsed = start.elapsed();
slow_first_event = first_event_elapsed >= slow_cutoff;
if first_result {
debug!(
"first result in {:?} (cid: {}, sub: {:?})",
first_event_elapsed, client_id, sub.id
"first result in {:?} (cid: {}, sub: {:?}) [used indexes: {:?}]",
first_event_elapsed, client_id, sub.id, idxs
);
first_result = false;
}
// logging for slow queries; show sub and SQL.
// to reduce logging; only show 1/16th of clients (leading 0)
if row_count == 0 && slow_first_event && client_id.starts_with("0") {
if row_count == 0 && slow_first_event && client_id.starts_with('0') {
debug!(
"query req (slow): {:?} (cid: {}, sub: {:?})",
sub, client_id, sub.id
);
debug!(
"query string (slow): {} (cid: {}, sub: {:?})",
q, client_id, sub.id
);
} else {
trace!(
"query req: {:?} (cid: {}, sub: {:?})",
sub,
client_id,
sub.id
);
trace!(
"query string: {} (cid: {}, sub: {:?})",
q,
client_id,
sub.id
);
}
}
// check if a checkpoint is trying to run, and abort
if row_count % 100 == 0 {
{
if safe_to_read.try_lock().is_err() {
// lock was held, abort this query
debug!("query aborted due to checkpoint (cid: {}, sub: {:?})", client_id, sub.id);
return Ok(());
}
}
}
// check if this is still active; every 100 rows
if row_count % 100 == 0 && abandon_query_rx.try_recv().is_ok() {
debug!("query aborted (cid: {}, sub: {:?})", client_id, sub.id);
@@ -809,10 +908,17 @@ pub async fn db_query(
trace!("db reader thread is stalled");
if last_successful_send + abort_cutoff < Instant::now() {
// the queue has been full for too long, abort
info!("aborting database query due to slow client");
info!("aborting database query due to slow client (cid: {}, sub: {:?})",
client_id, sub.id);
let ok: Result<()> = Ok(());
return ok;
}
// check if a checkpoint is trying to run, and abort
if safe_to_read.try_lock().is_err() {
// lock was held, abort this query
debug!("query aborted due to checkpoint (cid: {}, sub: {:?})", client_id, sub.id);
return Ok(());
}
// give the queue a chance to clear before trying again
thread::sleep(Duration::from_millis(100));
}

View File

@@ -80,7 +80,7 @@ impl FromStr for Operator {
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct ConditionQuery {
pub(crate) conditions: Vec<Condition>,
pub conditions: Vec<Condition>,
}
impl ConditionQuery {
@@ -137,9 +137,9 @@ pub fn validate_delegation(
/// An example complex condition would be: kind=1,2,3&created_at<1665265999
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct Condition {
pub(crate) field: Field,
pub(crate) operator: Operator,
pub(crate) values: Vec<u64>,
pub field: Field,
pub operator: Operator,
pub values: Vec<u64>,
}
impl Condition {
@@ -332,19 +332,6 @@ mod tests {
assert_eq!(parsed, cq);
Ok(())
}
fn simple_event() -> Event {
Event {
id: "0".to_owned(),
pubkey: "0".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: vec![],
content: "".to_owned(),
sig: "0".to_owned(),
tagidx: None,
}
}
// Check for condition logic on event w/ empty values
#[test]
fn condition_with_empty_values() {
@@ -353,7 +340,7 @@ mod tests {
operator: Operator::GreaterThan,
values: vec![],
};
let e = simple_event();
let e = Event::simple_event();
assert!(!c.allows_event(&e));
c.operator = Operator::LessThan;
assert!(!c.allows_event(&e));
@@ -373,7 +360,7 @@ mod tests {
operator: Operator::GreaterThan,
values: vec![10],
};
let mut e = simple_event();
let mut e = Event::simple_event();
// kind is not greater than 10, not allowed
e.kind = 1;
assert!(!c.allows_event(&e));
@@ -392,7 +379,7 @@ mod tests {
operator: Operator::Equals,
values: vec![0, 10, 20],
};
let mut e = simple_event();
let mut e = Event::simple_event();
// Allow if event kind is in list for Equals
e.kind = 10;
assert!(c.allows_event(&e));

View File

@@ -37,19 +37,19 @@ impl EventCmd {
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
pub struct Event {
pub id: String,
pub(crate) pubkey: String,
pub pubkey: String,
#[serde(skip)]
pub(crate) delegated_by: Option<String>,
pub(crate) created_at: u64,
pub(crate) kind: u64,
pub delegated_by: Option<String>,
pub created_at: u64,
pub kind: u64,
#[serde(deserialize_with = "tag_from_string")]
// NOTE: array-of-arrays may need to be more general than a string container
pub(crate) tags: Vec<Vec<String>>,
pub(crate) content: String,
pub(crate) sig: String,
pub tags: Vec<Vec<String>>,
pub content: String,
pub sig: String,
// Optimization for tag search, built on demand.
#[serde(skip)]
pub(crate) tagidx: Option<HashMap<char, HashSet<String>>>,
pub tagidx: Option<HashMap<char, HashSet<String>>>,
}
/// Simple tag type for array of array of strings.
@@ -101,10 +101,30 @@ impl From<EventCmd> for Result<Event> {
}
impl Event {
#[cfg(test)]
pub fn simple_event() -> Event {
Event {
id: "0".to_owned(),
pubkey: "0".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: vec![],
content: "".to_owned(),
sig: "0".to_owned(),
tagidx: None,
}
}
pub fn is_kind_metadata(&self) -> bool {
self.kind == 0
}
/// Should this event be replaced with newer timestamps from same author?
pub fn is_replaceable(&self) -> bool {
self.kind == 0 || self.kind == 3 || self.kind == 41 || (self.kind >= 10000 && self.kind < 20000)
}
/// Pull a NIP-05 Name out of the event, if one exists
pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
if self.is_kind_metadata() {
@@ -161,11 +181,11 @@ impl Event {
}
/// Update delegation status
fn update_delegation(&mut self) {
pub fn update_delegation(&mut self) {
self.delegated_by = self.delegated_author();
}
/// Build an event tag index
fn build_index(&mut self) {
pub fn build_index(&mut self) {
// if there are no tags; just leave the index as None
if self.tags.is_empty() {
return;
@@ -226,7 +246,7 @@ impl Event {
}
/// Check if this event has a valid signature.
fn validate(&self) -> Result<()> {
pub fn validate(&self) -> Result<()> {
// TODO: return a Result with a reason for invalid events
// validation is performed by:
// * parsing JSON string into event fields
@@ -319,31 +339,18 @@ impl Event {
#[cfg(test)]
mod tests {
use super::*;
fn simple_event() -> Event {
Event {
id: "0".to_owned(),
pubkey: "0".to_owned(),
delegated_by: None,
created_at: 0,
kind: 0,
tags: vec![],
content: "".to_owned(),
sig: "0".to_owned(),
tagidx: None,
}
}
#[test]
fn event_creation() {
// create an event
let event = simple_event();
let event = Event::simple_event();
assert_eq!(event.id, "0");
}
#[test]
fn event_serialize() -> Result<()> {
// serialize an event to JSON string
let event = simple_event();
let event = Event::simple_event();
let j = serde_json::to_string(&event)?;
assert_eq!(j, "{\"id\":\"0\",\"pubkey\":\"0\",\"created_at\":0,\"kind\":0,\"tags\":[],\"content\":\"\",\"sig\":\"0\"}");
Ok(())
@@ -351,14 +358,14 @@ mod tests {
#[test]
fn empty_event_tag_match() {
let event = simple_event();
let event = Event::simple_event();
assert!(!event
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
}
#[test]
fn single_event_tag_match() {
let mut event = simple_event();
let mut event = Event::simple_event();
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
event.build_index();
assert_eq!(
@@ -373,7 +380,7 @@ mod tests {
#[test]
fn event_tags_serialize() -> Result<()> {
// serialize an event with tags to JSON string
let mut event = simple_event();
let mut event = Event::simple_event();
event.tags = vec![
vec![
"e".to_owned(),
@@ -497,4 +504,17 @@ mod tests {
let expected = Some(expected_json.to_owned());
assert_eq!(c, expected);
}
#[test]
fn replaceable_event() {
let mut event = Event::simple_event();
event.kind=0;
assert!(event.is_replaceable());
event.kind=3;
assert!(event.is_replaceable());
event.kind=12000;
assert!(event.is_replaceable());
}
}

View File

@@ -35,7 +35,7 @@ impl From<config::Info> for RelayInfo {
description: i.description,
pubkey: i.pubkey,
contact: i.contact,
supported_nips: Some(vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 26]),
supported_nips: Some(vec![1, 2, 9, 11, 12, 15, 16, 20, 22]),
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
version: CARGO_PKG_VERSION.map(|x| x.to_owned()),
}

View File

@@ -1,3 +1,4 @@
pub mod cli;
pub mod close;
pub mod config;
pub mod conn;

View File

@@ -1,42 +1,41 @@
//! Server process
use clap::Parser;
use nostr_rs_relay::cli::*;
use nostr_rs_relay::config;
use nostr_rs_relay::server::start_server;
use std::env;
use std::sync::mpsc as syncmpsc;
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
use std::thread;
use tracing::info;
use console_subscriber::ConsoleLayer;
/// Return a requested DB name from command line arguments.
fn db_from_args(args: &[String]) -> Option<String> {
if args.len() == 3 && args.get(1) == Some(&"--db".to_owned()) {
return args.get(2).map(std::clone::Clone::clone);
}
None
}
/// Start running a Nostr relay server.
fn main() {
// setup tracing
let _trace_sub = tracing_subscriber::fmt::try_init();
info!("Starting up from main");
// get database directory from args
let args: Vec<String> = env::args().collect();
let db_dir: Option<String> = db_from_args(&args);
// configure settings from config.toml
// replace default settings with those read from config.toml
let mut settings = config::Settings::new();
// setup tracing
if settings.diagnostics.tracing {
// enable tracing with tokio-console
ConsoleLayer::builder().with_default_env().init();
} else {
// standard logging
tracing_subscriber::fmt::try_init().unwrap();
}
info!("Starting up from main");
let args = CLIArgs::parse();
// get database directory from args
let db_dir = args.db;
// update with database location
if let Some(db) = db_dir {
settings.database.data_directory = db;
if db_dir.len() > 0 {
settings.database.data_directory = db_dir;
}
let (_, ctrl_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();

View File

@@ -13,21 +13,22 @@ use tracing::{debug, error, info};
/// Startup DB Pragmas
pub const STARTUP_SQL: &str = r##"
PRAGMA main.synchronous=NORMAL;
PRAGMA main.synchronous = NORMAL;
PRAGMA foreign_keys = ON;
PRAGMA journal_size_limit=32768;
PRAGMA journal_size_limit = 32768;
pragma mmap_size = 17179869184; -- cap mmap at 16GB
"##;
/// Latest database version
pub const DB_VERSION: usize = 11;
pub const DB_VERSION: usize = 15;
/// Schema definition
const INIT_SQL: &str = formatcp!(
r##"
-- Database settings
PRAGMA encoding = "UTF-8";
PRAGMA journal_mode=WAL;
PRAGMA journal_mode = WAL;
PRAGMA auto_vacuum = FULL;
PRAGMA main.synchronous=NORMAL;
PRAGMA foreign_keys = ON;
PRAGMA application_id = 1654008667;
@@ -49,9 +50,14 @@ content TEXT NOT NULL -- serialized json of event object
-- Event Indexes
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
CREATE INDEX IF NOT EXISTS author_index ON event(author);
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author);
CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at);
CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at);
CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind);
-- Tag Table
-- Tag values are stored as either a BLOB (if they come in as a
@@ -93,6 +99,20 @@ pub fn curr_db_version(conn: &mut Connection) -> Result<usize> {
Ok(curr_version)
}
/// Determine event count
pub fn db_event_count(conn: &mut Connection) -> Result<usize> {
let query = "SELECT count(*) FROM event;";
let count = conn.query_row(query, [], |row| row.get(0))?;
Ok(count)
}
/// Determine tag count
pub fn db_tag_count(conn: &mut Connection) -> Result<usize> {
let query = "SELECT count(*) FROM tag;";
let count = conn.query_row(query, [], |row| row.get(0))?;
Ok(count)
}
fn mig_init(conn: &mut PooledConnection) -> Result<usize> {
match conn.execute_batch(INIT_SQL) {
Ok(()) => {
@@ -140,19 +160,15 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
if curr_version == 1 {
curr_version = mig_1_to_2(conn)?;
}
if curr_version == 2 {
curr_version = mig_2_to_3(conn)?;
}
if curr_version == 3 {
curr_version = mig_3_to_4(conn)?;
}
if curr_version == 4 {
curr_version = mig_4_to_5(conn)?;
}
if curr_version == 5 {
curr_version = mig_5_to_6(conn)?;
}
@@ -171,6 +187,18 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
if curr_version == 10 {
curr_version = mig_10_to_11(conn)?;
}
if curr_version == 11 {
curr_version = mig_11_to_12(conn)?;
}
if curr_version == 12 {
curr_version = mig_12_to_13(conn)?;
}
if curr_version == 13 {
curr_version = mig_13_to_14(conn)?;
}
if curr_version == 14 {
curr_version = mig_14_to_15(conn)?;
}
if curr_version == DB_VERSION {
info!(
@@ -198,6 +226,62 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
Ok(())
}
pub fn rebuild_tags(conn: &mut PooledConnection) -> Result<()> {
// Check how many events we have to process
let count = db_event_count(conn)?;
let update_each_percent = 0.05;
let mut percent_done = 0.0;
let mut events_processed = 0;
let start = Instant::now();
let tx = conn.transaction()?;
{
// Clear out table
tx.execute("DELETE FROM tag;", [])?;
let mut stmt = tx.prepare("select id, content from event order by id;")?;
let mut tag_rows = stmt.query([])?;
while let Some(row) = tag_rows.next()? {
if (events_processed as f32)/(count as f32) > percent_done {
info!("Tag update {}% complete...", (100.0*percent_done).round());
percent_done += update_each_percent;
}
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_id: u64 = row.get(0)?;
let event_json: String = row.get(1)?;
let event: Event = serde_json::from_str(&event_json)?;
// look at each event, and each tag, creating new tag entries if appropriate.
for t in event.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagnamechar_opt = single_char_tagname(tagname);
if tagnamechar_opt.is_none() {
continue;
}
// safe because len was > 1
let tagval = t.get(1).unwrap();
// insert as BLOB if we can restore it losslessly.
// this means it needs to be even length and lowercase.
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
tx.execute(
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
params![event_id, tagname, hex::decode(tagval).ok()],
)?;
} else {
// otherwise, insert as text
tx.execute(
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
params![event_id, tagname, &tagval],
)?;
}
}
events_processed += 1;
}
}
tx.commit()?;
info!("rebuilt tags in {:?}", start.elapsed());
Ok(())
}
//// Migration Scripts
fn mig_1_to_2(conn: &mut PooledConnection) -> Result<usize> {
@@ -329,7 +413,6 @@ fn mig_5_to_6(conn: &mut PooledConnection) -> Result<usize> {
let mut stmt = tx.prepare("select id, content from event order by id;")?;
let mut tag_rows = stmt.query([])?;
while let Some(row) = tag_rows.next()? {
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_id: u64 = row.get(0)?;
let event_json: String = row.get(1)?;
let event: Event = serde_json::from_str(&event_json)?;
@@ -371,7 +454,6 @@ fn mig_5_to_6(conn: &mut PooledConnection) -> Result<usize> {
fn mig_6_to_7(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 6->7");
// only change is adding a hidden column to events.
let upgrade_sql = r##"
ALTER TABLE event ADD delegated_by BLOB;
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
@@ -469,3 +551,103 @@ PRAGMA user_version = 11;
}
Ok(11)
}
fn mig_11_to_12(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 11->12");
let start = Instant::now();
let tx = conn.transaction()?;
{
// Lookup every replaceable event
let mut stmt = tx.prepare("select kind,author from event where kind in (0,3,41) or (kind>=10000 and kind<20000) order by id;")?;
let mut replaceable_rows = stmt.query([])?;
info!("updating replaceable events; this could take awhile...");
while let Some(row) = replaceable_rows.next()? {
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_kind: u64 = row.get(0)?;
let event_author: Vec<u8> = row.get(1)?;
tx.execute(
"UPDATE event SET hidden=TRUE WHERE hidden!=TRUE and kind=? and author=? and id NOT IN (SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1)",
params![event_kind, event_author, event_kind, event_author],
)?;
}
tx.execute("PRAGMA user_version = 12;", [])?;
}
tx.commit()?;
info!("database schema upgraded v11 -> v12 in {:?}", start.elapsed());
// vacuum after large table modification
let start = Instant::now();
conn.execute("VACUUM;", [])?;
info!("vacuumed DB after hidden event cleanup in {:?}", start.elapsed());
Ok(12)
}
fn mig_12_to_13(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 12->13");
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author);
reindex;
pragma optimize;
PRAGMA user_version = 13;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v12 -> v13");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(13)
}
fn mig_13_to_14(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 13->14");
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at);
pragma optimize;
PRAGMA user_version = 14;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v13 -> v14");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(14)
}
fn mig_14_to_15(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 14->15");
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at);
CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind);
PRAGMA user_version = 15;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v14 -> v15");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
// clear out hidden events
let clear_hidden_sql = r##"DELETE FROM event WHERE HIDDEN=true;"##;
info!("removing hidden events; this may take awhile...");
match conn.execute_batch(clear_hidden_sql) {
Ok(()) => {
info!("all hidden events removed");
},
Err(err) => {
error!("delete failed: {}", err);
panic!("could not remove hidden events");
}
}
Ok(15)
}

View File

@@ -25,10 +25,13 @@ use hyper::{
use rusqlite::OpenFlags;
use serde::{Deserialize, Serialize};
use serde_json::json;
use tokio::sync::Mutex;
use std::collections::HashMap;
use std::convert::Infallible;
use std::net::SocketAddr;
use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::Ordering;
use std::sync::mpsc::Receiver as MpscReceiver;
use std::time::Duration;
use std::time::Instant;
@@ -53,6 +56,7 @@ async fn handle_web_request(
broadcast: Sender<Event>,
event_tx: tokio::sync::mpsc::Sender<SubmittedEvent>,
shutdown: Receiver<()>,
safe_to_read: Arc<Mutex<u64>>,
) -> Result<Response<Body>, Infallible> {
match (
request.uri().path(),
@@ -75,6 +79,7 @@ async fn handle_web_request(
Ok(upgraded) => {
// set WebSocket configuration options
let config = WebSocketConfig {
max_send_queue: Some(1024),
max_message_size: settings.limits.max_ws_message_bytes,
max_frame_size: settings.limits.max_ws_frame_bytes,
..Default::default()
@@ -88,6 +93,7 @@ async fn handle_web_request(
Some(config),
)
.await;
let origin = get_header_string("origin", request.headers());
let user_agent = get_header_string("user-agent", request.headers());
// determine the remote IP from headers if the exist
let header_ip = settings
@@ -101,6 +107,7 @@ async fn handle_web_request(
let client_info = ClientInfo {
remote_ip,
user_agent,
origin,
};
// spawn a nostr server with our websocket
tokio::spawn(nostr_server(
@@ -111,6 +118,7 @@ async fn handle_web_request(
broadcast,
event_tx,
shutdown,
safe_to_read,
));
}
// todo: trace, don't print...
@@ -245,14 +253,19 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
// configure tokio runtime
let rt = Builder::new_multi_thread()
.enable_all()
.thread_name("tokio-ws")
.thread_name_fn(|| {
// give each thread a unique numeric name
static ATOMIC_ID: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(0);
let id = ATOMIC_ID.fetch_add(1,Ordering::SeqCst);
format!("tokio-ws-{}", id)
})
// limit concurrent SQLite blocking threads
.max_blocking_threads(settings.limits.max_blocking_threads)
.on_thread_start(|| {
trace!("started new thread");
trace!("started new thread: {:?}", std::thread::current().name());
})
.on_thread_stop(|| {
trace!("stopping thread");
trace!("stopped thread: {:?}", std::thread::current().name());
})
.build()
.unwrap();
@@ -317,10 +330,16 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
&settings,
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
1,
1,
2,
false,
);
db::db_maintenance(maintenance_pool).await;
// Create a mutex that will block readers, so that a
// checkpoint can be performed quickly.
let safe_to_read = Arc::new(Mutex::new(0));
db::db_optimize_task(maintenance_pool.clone()).await;
db::db_checkpoint_task(maintenance_pool, safe_to_read.clone()).await;
// listen for (external to tokio) shutdown request
let controlled_shutdown = invoke_shutdown.clone();
@@ -356,6 +375,10 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
db_max_conn,
true,
);
// spawn a task to check the pool size.
let pool_monitor = pool.clone();
tokio::spawn(async move {db::monitor_pool("reader", pool_monitor).await;});
// A `Service` is needed for every connection, so this
// creates one from our `handle_request` function.
let make_svc = make_service_fn(|conn: &AddrStream| {
@@ -365,6 +388,7 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
let event = event_tx.clone();
let stop = invoke_shutdown.clone();
let settings = settings.clone();
let safe_to_read = safe_to_read.clone();
async move {
// service_fn converts our function into a `Service`
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
@@ -376,6 +400,7 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
bcast.clone(),
event.clone(),
stop.subscribe(),
safe_to_read.clone(),
)
}))
}
@@ -408,6 +433,10 @@ fn convert_to_msg(msg: String, max_bytes: Option<usize>) -> Result<NostrMessage>
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
match parsed_res {
Ok(m) => {
if let NostrMessage::SubMsg(_) = m {
// note; this only prints the first 16k of a REQ and then truncates.
trace!("REQ: {:?}",msg);
};
if let NostrMessage::EventMsg(_) = m {
if let Some(max_size) = max_bytes {
// check length, ensure that some max size is set.
@@ -419,8 +448,8 @@ fn convert_to_msg(msg: String, max_bytes: Option<usize>) -> Result<NostrMessage>
Ok(m)
}
Err(e) => {
debug!("proto parse error: {:?}", e);
debug!("parse error on message: {}", msg.trim());
trace!("proto parse error: {:?}", e);
trace!("parse error on message: {:?}", msg.trim());
Err(Error::ProtoParseError)
}
}
@@ -439,6 +468,7 @@ fn make_notice_message(notice: Notice) -> Message {
struct ClientInfo {
remote_ip: String,
user_agent: Option<String>,
origin: Option<String>,
}
/// Handle new client connections. This runs through an event loop
@@ -451,6 +481,7 @@ async fn nostr_server(
broadcast: Sender<Event>,
event_tx: mpsc::Sender<SubmittedEvent>,
mut shutdown: Receiver<()>,
safe_to_read: Arc<Mutex<u64>>,
) {
// the time this websocket nostr server started
let orig_start = Instant::now();
@@ -502,9 +533,14 @@ async fn nostr_server(
let mut client_published_event_count: usize = 0;
let mut client_received_event_count: usize = 0;
debug!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
if let Some(ua) = client_info.user_agent {
debug!("cid: {}, user-agent: {:?}", cid, ua);
}
let origin = client_info.origin.unwrap_or_else(|| "<unspecified>".into());
let user_agent = client_info
.user_agent
.unwrap_or_else(|| "<unspecified>".into());
debug!(
"cid: {}, origin: {:?}, user-agent: {:?}",
cid, origin, user_agent
);
loop {
tokio::select! {
_ = shutdown.recv() => {
@@ -655,8 +691,10 @@ async fn nostr_server(
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
previous_query.send(()).ok();
}
if s.needs_historical_events() {
// start a database query. this spawns a blocking database query on a worker thread.
db::db_query(s, cid.to_owned(), pool.clone(), query_tx.clone(), abandon_query_rx).await;
db::db_query(s, cid.to_owned(), pool.clone(), query_tx.clone(), abandon_query_rx,safe_to_read.clone()).await;
}
},
Err(e) => {
info!("Subscription error: {} (cid: {}, sub: {:?})", e, cid, s.id);

View File

@@ -65,12 +65,21 @@ impl<'de> Deserialize<'de> for ReqFilter {
tags: None,
force_no_match: false,
};
let empty_string = "".into();
let mut ts = None;
// iterate through each key, and assign values that exist
for (key, val) in filter.into_iter() {
// ids
if key == "ids" {
rf.ids = Deserialize::deserialize(val).ok();
let raw_ids: Option<Vec<String>>= Deserialize::deserialize(val).ok();
if let Some(a) = raw_ids.as_ref() {
if a.contains(&empty_string) {
return Err(serde::de::Error::invalid_type(
Unexpected::Other("prefix matches must not be empty strings"),
&"a json object"));
}
}
rf.ids =raw_ids;
} else if key == "kinds" {
rf.kinds = Deserialize::deserialize(val).ok();
} else if key == "since" {
@@ -80,7 +89,15 @@ impl<'de> Deserialize<'de> for ReqFilter {
} else if key == "limit" {
rf.limit = Deserialize::deserialize(val).ok();
} else if key == "authors" {
rf.authors = Deserialize::deserialize(val).ok();
let raw_authors: Option<Vec<String>>= Deserialize::deserialize(val).ok();
if let Some(a) = raw_authors.as_ref() {
if a.contains(&empty_string) {
return Err(serde::de::Error::invalid_type(
Unexpected::Other("prefix matches must not be empty strings"),
&"a json object"));
}
}
rf.authors = raw_authors;
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
if let Some(tag_search) = tag_search_char_from_filter(key) {
if ts.is_none() {
@@ -183,6 +200,13 @@ impl Subscription {
pub fn get_id(&self) -> String {
self.id.clone()
}
/// Determine if any filter is requesting historical (database)
/// queries. If every filter has limit:0, we do not need to query the DB.
pub fn needs_historical_events(&self) -> bool {
self.filters.iter().any(|f| f.limit!=Some(0))
}
/// Determine if this subscription matches a given [`Event`]. Any
/// individual filter match is sufficient.
pub fn interested_in_event(&self, event: &Event) -> bool {
@@ -294,6 +318,24 @@ mod tests {
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
}
#[test]
fn req_empty_authors_prefix() {
let raw_json = "[\"REQ\",\"some-id\",{\"authors\": [\"\"]}]";
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
}
#[test]
fn req_empty_ids_prefix() {
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\"]}]";
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
}
#[test]
fn req_empty_ids_prefix_mixed() {
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\",\"aaa\"]}]";
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
}
#[test]
fn legacy_filter() {
// legacy field in filter

10
tests/cli.rs Normal file
View File

@@ -0,0 +1,10 @@
#[cfg(test)]
mod tests {
use nostr_rs_relay::cli::CLIArgs;
#[test]
fn cli_tests() {
use clap::CommandFactory;
CLIArgs::command().debug_assert();
}
}