Compare commits

...

70 Commits

Author SHA1 Message Date
Greg Heartsfield
95748647f0 build: bump version to 0.7.13 2022-12-22 16:27:34 -06:00
Greg Heartsfield
25480e837f fix: do not block writers for more than 1 second during checkpoints 2022-12-22 16:10:49 -06:00
Greg Heartsfield
b80b54cd9d improvement: reduce logging, especially for database pool size 2022-12-22 15:47:33 -06:00
Greg Heartsfield
8ea732cbe5 feat: perform regular database maintenance (60sec), without blocking main writer thread 2022-12-22 15:16:21 -06:00
Greg Heartsfield
0f68c4e5c2 refactor: formatting 2022-12-22 15:15:45 -06:00
Greg Heartsfield
dab2cd5792 wip: future changes to rustfmt 2022-12-22 15:13:54 -06:00
Greg Heartsfield
f411aa6fc2 fix: do not re-verify NIP-05 entries where metadata was deleted 2022-12-22 13:01:48 -06:00
Greg Heartsfield
d31bbda087 improvement: reduce lifetime of database connections 2022-12-22 13:01:12 -06:00
Greg Heartsfield
5917bc53b2 improvement: run maintenance every 60 seconds instead of by event count 2022-12-22 11:40:17 -06:00
Greg Heartsfield
91177c61a1 improvement: log reason for new event creation from nip05 2022-12-22 10:48:30 -06:00
Greg Heartsfield
53c2a8051c improvement: reduce logging 2022-12-22 10:29:27 -06:00
Greg Heartsfield
168cf513ac feat: perform full checkpoints and truncate WAL every 2k events 2022-12-22 10:11:05 -06:00
Greg Heartsfield
ea204761c9 fix: do not show slow queries more than once per sub 2022-12-20 15:41:50 -06:00
Greg Heartsfield
c270ae1434 improvement: reduce event count for db writer pauses 2022-12-20 15:25:24 -06:00
Greg Heartsfield
64bd983cb6 perf: every 5000 persisted events, pause for 500ms for backups
I have observed backups running for a very long time under heavy load,
this introduces some artificial delay to give the online backup enough
time to make progress.
2022-12-20 15:05:04 -06:00
Greg Heartsfield
1c153bc784 perf: shed DB query load when queue gets large 2022-12-20 13:23:21 -06:00
Greg Heartsfield
dc11d9a619 improvement: explicitly rollback transaction on duplicate event 2022-12-20 13:23:04 -06:00
Greg Heartsfield
cd1557787b improvement: log write pool 2022-12-20 13:21:57 -06:00
Greg Heartsfield
86bb7aeb9a improvement: function to check pool capacity 2022-12-20 10:07:01 -06:00
Greg Heartsfield
ce37fc1a2d build: bump version to 0.7.12 2022-12-19 14:50:42 -06:00
Greg Heartsfield
2cfd384339 perf: drop db handles that are not quickly read 2022-12-19 00:18:39 -06:00
Greg Heartsfield
8c013107f9 perf: increase upper bound for sqlite mmap 2022-12-18 23:19:43 -06:00
Greg Heartsfield
64a4466d30 perf: backing down on max_blocking_threads 2022-12-18 23:14:41 -06:00
Greg Heartsfield
1596c23eb4 perf: increase blocking threads now that contention is reduced 2022-12-18 22:46:32 -06:00
Greg Heartsfield
129badd4e1 perf: reduce per thread mmap allocation for DB 2022-12-18 22:45:32 -06:00
Greg Heartsfield
6f7c080180 improvement: reduce number of writer blocking threads from 4->2 2022-12-18 22:32:31 -06:00
Greg Heartsfield
af92561ef6 perf: remove shared cache mode (experiment) 2022-12-18 22:15:50 -06:00
Greg Heartsfield
d833a3e40d perf: reduce logging 2022-12-18 22:11:46 -06:00
Greg Heartsfield
462eb46642 build: bump version to 0.7.11 2022-12-18 20:52:01 -06:00
Greg Heartsfield
cf144d503d perf: reduce logging for slow queries 2022-12-18 20:47:11 -06:00
Greg Heartsfield
fb8375aef2 build: bump version to 0.7.10 2022-12-18 13:46:18 -06:00
Greg Heartsfield
88ac31b549 perf: increase channel size for DB communication 2022-12-18 13:44:28 -06:00
Greg Heartsfield
677b7d39e9 improvement: log slow requests that return zero results 2022-12-18 13:42:31 -06:00
Greg Heartsfield
b24d2f9aaa perf: set default blocking threads to lower value 2022-12-18 12:20:57 -06:00
Greg Heartsfield
7a3899d852 build: bump version to 0.7.9 2022-12-18 09:21:07 -06:00
Greg Heartsfield
818108b793 improvement: upgrade multiple dependencies
Updating anyhow v1.0.66 -> v1.0.67
Updating async-trait v0.1.59 -> v0.1.60
Updating cxx v1.0.83 -> v1.0.84
Updating cxx-build v1.0.83 -> v1.0.84
Updating cxxbridge-flags v1.0.83 -> v1.0.84
Updating cxxbridge-macro v1.0.83 -> v1.0.84
Updating itoa v1.0.4 -> v1.0.5
Updating link-cplusplus v1.0.7 -> v1.0.8
Updating proc-macro2 v1.0.47 -> v1.0.48
Updating quote v1.0.21 -> v1.0.22
Updating rustversion v1.0.9 -> v1.0.11
Updating ryu v1.0.11 -> v1.0.12
Updating scratch v1.0.2 -> v1.0.3
Updating serde v1.0.150 -> v1.0.151
Updating serde_derive v1.0.150 -> v1.0.151
Updating serde_json v1.0.89 -> v1.0.90
Updating syn v1.0.105 -> v1.0.106
Updating thiserror v1.0.37 -> v1.0.38
Updating thiserror-impl v1.0.37 -> v1.0.38
Updating unicode-ident v1.0.5 -> v1.0.6
2022-12-18 09:16:09 -06:00
Greg Heartsfield
d10348f7e1 feat: configurable blocking threads 2022-12-18 09:14:04 -06:00
Greg Heartsfield
8598e443d8 wip: add configuration for future feature (client concurrent db limits) 2022-12-17 23:19:48 -06:00
Greg Heartsfield
43222d44e5 feat: perform optimization after seeing many events 2022-12-17 23:18:54 -06:00
Greg Heartsfield
7c1516c4fb perf: add index for tags 2022-12-17 23:17:53 -06:00
Greg Heartsfield
0c72053a49 perf: increase mmap size to 1GB 2022-12-17 23:17:16 -06:00
Greg Heartsfield
3f32ff67ab improvement: minor logging 2022-12-17 23:11:14 -06:00
Greg Heartsfield
0b9778d6ca refactor: simplify tracking of subscriptions 2022-12-17 20:46:58 -06:00
Greg Heartsfield
9be04120c7 build: bump version to 0.7.8 2022-12-17 12:01:43 -06:00
Greg Heartsfield
cc06167e06 perf: add composite index for tag table 2022-12-17 12:01:20 -06:00
Greg Heartsfield
b6e33f044f improvement: limit db connection max lifetime 2022-12-17 10:47:35 -06:00
Greg Heartsfield
1b2c6f9fca build: bump version to 0.7.7 2022-12-17 10:09:44 -06:00
Greg Heartsfield
0d8d39ad22 feat: add rate limiting setting for subscription creation 2022-12-17 09:27:29 -06:00
Greg Heartsfield
0e851d4f71 build: bump version to 0.7.6 2022-12-17 07:51:57 -06:00
Greg Heartsfield
3c880b2f49 perf: pull distinct to outermost SQL 2022-12-17 07:49:28 -06:00
Greg Heartsfield
7a4c9266ec improvement: make hexsearch structs sortable 2022-12-17 07:49:05 -06:00
Greg Heartsfield
e8557d421b build: bump version to 0.7.5 2022-12-16 17:21:00 -06:00
Greg Heartsfield
7ca9c864f2 improvement: DB pool logging shows used connections directly 2022-12-16 17:01:49 -06:00
Greg Heartsfield
838aafd079 improvement: consistent log messages for client/sub ids 2022-12-16 15:22:27 -06:00
Greg Heartsfield
e554b10ac2 improvement: tweak sub/sql logging for slow queries 2022-12-16 14:55:45 -06:00
Greg Heartsfield
b0bfaa48fc improvement: ignore duplicate REQ messages 2022-12-16 14:37:02 -06:00
Greg Heartsfield
2e9b1b6ba7 docs: comment reason for force_no_match 2022-12-16 14:35:21 -06:00
Greg Heartsfield
4d9012d94c improvement: upgrade docker builder and base images 2022-12-16 14:33:08 -06:00
Greg Heartsfield
ffe7aac066 improvement: upgrade multiple dependencies
Updating aho-corasick v0.7.19 -> v0.7.20
Updating async-trait v0.1.58 -> v0.1.59
Updating axum v0.5.17 -> v0.6.1
Updating axum-core v0.2.9 -> v0.3.0
Updating bytes v1.2.1 -> v1.3.0
Updating cc v1.0.76 -> v1.0.78
Updating crossbeam-utils v0.8.12 -> v0.8.14
Updating cxx v1.0.82 -> v1.0.83
Updating cxx-build v1.0.82 -> v1.0.83
Updating cxxbridge-flags v1.0.82 -> v1.0.83
Updating cxxbridge-macro v1.0.82 -> v1.0.83
Updating flate2 v1.0.24 -> v1.0.25
Updating libc v0.2.137 -> v0.2.138
Updating matchit v0.5.0 -> v0.7.0
Updating miniz_oxide v0.5.4 -> v0.6.2
Updating openssl v0.10.42 -> v0.10.44
Updating openssl-sys v0.9.77 -> v0.9.79
Updating parking_lot_core v0.9.4 -> v0.9.5
Updating pest v2.4.1 -> v2.5.1
Updating pest_derive v2.4.1 -> v2.5.1
Updating pest_generator v2.4.1 -> v2.5.1
Updating pest_meta v2.4.1 -> v2.5.1
Updating prost v0.11.2 -> v0.11.3
Adding rustversion v1.0.9
Updating serde v1.0.147 -> v1.0.150
Updating serde_derive v1.0.147 -> v1.0.150
Updating serde_json v1.0.88 -> v1.0.89
Updating sha-1 v0.10.0 -> v0.10.1
Updating syn v1.0.103 -> v1.0.105
Updating tokio v1.22.0 -> v1.23.0
Updating tokio-macros v1.8.0 -> v1.8.2
Updating toml v0.5.9 -> v0.5.10
Updating tonic v0.8.2 -> v0.8.3
Updating tower-http v0.3.4 -> v0.3.5
Updating typenum v1.15.0 -> v1.16.0
2022-12-16 11:17:05 -06:00
Greg Heartsfield
f9695bd0a9 fix: db schema version updates correctly for v9 2022-12-16 10:01:49 -06:00
Greg Heartsfield
7c4bf5cc8f fix: run db migration for v9 2022-12-16 08:21:00 -06:00
Greg Heartsfield
e2de162931 feat: only show SQL in logs for slow queries unless tracing 2022-12-16 08:17:39 -06:00
Greg Heartsfield
4f606615eb perf: indexing improvement 2022-12-16 08:16:49 -06:00
Greg Heartsfield
84a58ebbcd build: bump version to 0.7.3 2022-12-16 06:32:00 -06:00
Greg Heartsfield
c48e45686d perf: schema updates for better event indexing 2022-12-15 08:48:35 -06:00
Greg Heartsfield
bbe359364a refactor: clippy warnings 2022-12-15 08:43:36 -06:00
Greg Heartsfield
9e9c494367 perf: significant query speedup when using kinds.
fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/51
2022-12-14 21:04:49 -06:00
Greg Heartsfield
5fa24bc9f1 fix: send EOSE when ids list is empty in subscriptions
Fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/50
2022-11-19 10:35:00 -06:00
Greg Heartsfield
4de7490d97 fix: send EOSE when authors list is empty in subscriptions
Fixes: https://todo.sr.ht/~gheartsfield/nostr-rs-relay/49
2022-11-19 10:00:38 -06:00
Greg Heartsfield
d0f63dc66e docs: update container instructions for rootless podman 2022-11-19 09:32:26 -06:00
14 changed files with 585 additions and 190 deletions

189
Cargo.lock generated
View File

@@ -27,9 +27,9 @@ dependencies = [
[[package]]
name = "aho-corasick"
version = "0.7.19"
version = "0.7.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac"
dependencies = [
"memchr",
]
@@ -54,9 +54,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.66"
version = "1.0.67"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6"
checksum = "7724808837b77f4b4de9d283820f9d98bcf496d5692934b857a2399d31ff22e6"
[[package]]
name = "async-stream"
@@ -81,9 +81,9 @@ dependencies = [
[[package]]
name = "async-trait"
version = "0.1.58"
version = "0.1.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c"
checksum = "677d1d8ab452a3936018a687b20e6f7cf5363d713b732b8884001317b0e48aa3"
dependencies = [
"proc-macro2",
"quote",
@@ -107,9 +107,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "axum"
version = "0.5.17"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43"
checksum = "08b108ad2665fa3f6e6a517c3d80ec3e77d224c47d605167aefaa5d7ef97fa48"
dependencies = [
"async-trait",
"axum-core",
@@ -125,9 +125,9 @@ dependencies = [
"mime",
"percent-encoding",
"pin-project-lite",
"rustversion",
"serde",
"sync_wrapper",
"tokio",
"tower",
"tower-http",
"tower-layer",
@@ -136,9 +136,9 @@ dependencies = [
[[package]]
name = "axum-core"
version = "0.2.9"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc"
checksum = "79b8558f5a0581152dc94dcd289132a1d377494bdeafcd41869b3258e3e2ad92"
dependencies = [
"async-trait",
"bytes",
@@ -146,6 +146,7 @@ dependencies = [
"http",
"http-body",
"mime",
"rustversion",
"tower-layer",
"tower-service",
]
@@ -194,15 +195,15 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
[[package]]
name = "bytes"
version = "1.2.1"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db"
checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c"
[[package]]
name = "cc"
version = "1.0.76"
version = "1.0.78"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76a284da2e6fe2092f2353e51713435363112dfd60030e22add80be333fb928f"
checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d"
[[package]]
name = "cfg-if"
@@ -362,9 +363,9 @@ dependencies = [
[[package]]
name = "crossbeam-utils"
version = "0.8.12"
version = "0.8.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac"
checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f"
dependencies = [
"cfg-if",
]
@@ -381,9 +382,9 @@ dependencies = [
[[package]]
name = "cxx"
version = "1.0.82"
version = "1.0.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4a41a86530d0fe7f5d9ea779916b7cadd2d4f9add748b99c2c029cbbdfaf453"
checksum = "27874566aca772cb515af4c6e997b5fe2119820bca447689145e39bb734d19a0"
dependencies = [
"cc",
"cxxbridge-flags",
@@ -393,9 +394,9 @@ dependencies = [
[[package]]
name = "cxx-build"
version = "1.0.82"
version = "1.0.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06416d667ff3e3ad2df1cd8cd8afae5da26cf9cec4d0825040f88b5ca659a2f0"
checksum = "e7bb951f2523a49533003656a72121306b225ec16a49a09dc6b0ba0d6f3ec3c0"
dependencies = [
"cc",
"codespan-reporting",
@@ -408,15 +409,15 @@ dependencies = [
[[package]]
name = "cxxbridge-flags"
version = "1.0.82"
version = "1.0.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "820a9a2af1669deeef27cb271f476ffd196a2c4b6731336011e0ba63e2c7cf71"
checksum = "be778b6327031c1c7b61dd2e48124eee5361e6aa76b8de93692f011b08870ab4"
[[package]]
name = "cxxbridge-macro"
version = "1.0.82"
version = "1.0.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a08a6e2fcc370a089ad3b4aaf54db3b1b4cee38ddabce5896b33eb693275f470"
checksum = "7b8a2b87662fe5a0a0b38507756ab66aff32638876a0866e5a5fc82ceb07ee49"
dependencies = [
"proc-macro2",
"quote",
@@ -484,9 +485,9 @@ dependencies = [
[[package]]
name = "flate2"
version = "1.0.24"
version = "1.0.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6"
checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841"
dependencies = [
"crc32fast",
"miniz_oxide",
@@ -900,9 +901,9 @@ dependencies = [
[[package]]
name = "itoa"
version = "1.0.4"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440"
[[package]]
name = "js-sys"
@@ -932,9 +933,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.137"
version = "0.2.138"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89"
checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8"
[[package]]
name = "libsqlite3-sys"
@@ -949,9 +950,9 @@ dependencies = [
[[package]]
name = "link-cplusplus"
version = "1.0.7"
version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369"
checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5"
dependencies = [
"cc",
]
@@ -1010,9 +1011,9 @@ dependencies = [
[[package]]
name = "matchit"
version = "0.5.0"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb"
checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40"
[[package]]
name = "memchr"
@@ -1034,9 +1035,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
[[package]]
name = "miniz_oxide"
version = "0.5.4"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34"
checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa"
dependencies = [
"adler",
]
@@ -1095,7 +1096,7 @@ checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21"
[[package]]
name = "nostr-rs-relay"
version = "0.7.2"
version = "0.7.13"
dependencies = [
"anyhow",
"bitcoin_hashes",
@@ -1224,9 +1225,9 @@ checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860"
[[package]]
name = "openssl"
version = "0.10.42"
version = "0.10.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13"
checksum = "29d971fd5722fec23977260f6e81aa67d2f22cadbdc2aa049f1022d9a3be1566"
dependencies = [
"bitflags",
"cfg-if",
@@ -1256,9 +1257,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
[[package]]
name = "openssl-sys"
version = "0.9.77"
version = "0.9.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b03b84c3b2d099b81f0953422b4d4ad58761589d0229b5506356afca05a3670a"
checksum = "5454462c0eced1e97f2ec09036abc8da362e66802f66fd20f86854d9d8cbcbc4"
dependencies = [
"autocfg 1.1.0",
"cc",
@@ -1289,9 +1290,9 @@ dependencies = [
[[package]]
name = "parking_lot_core"
version = "0.9.4"
version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0"
checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba"
dependencies = [
"cfg-if",
"libc",
@@ -1325,9 +1326,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
[[package]]
name = "pest"
version = "2.4.1"
version = "2.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a528564cc62c19a7acac4d81e01f39e53e25e17b934878f4c6d25cc2836e62f8"
checksum = "cc8bed3549e0f9b0a2a78bf7c0018237a2cdf085eecbbc048e52612438e4e9d0"
dependencies = [
"thiserror",
"ucd-trie",
@@ -1335,9 +1336,9 @@ dependencies = [
[[package]]
name = "pest_derive"
version = "2.4.1"
version = "2.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5fd9bc6500181952d34bd0b2b0163a54d794227b498be0b7afa7698d0a7b18f"
checksum = "cdc078600d06ff90d4ed238f0119d84ab5d43dbaad278b0e33a8820293b32344"
dependencies = [
"pest",
"pest_generator",
@@ -1345,9 +1346,9 @@ dependencies = [
[[package]]
name = "pest_generator"
version = "2.4.1"
version = "2.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2610d5ac5156217b4ff8e46ddcef7cdf44b273da2ac5bca2ecbfa86a330e7c4"
checksum = "28a1af60b1c4148bb269006a750cff8e2ea36aff34d2d96cf7be0b14d1bed23c"
dependencies = [
"pest",
"pest_meta",
@@ -1358,9 +1359,9 @@ dependencies = [
[[package]]
name = "pest_meta"
version = "2.4.1"
version = "2.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "824749bf7e21dd66b36fbe26b3f45c713879cccd4a009a917ab8e045ca8246fe"
checksum = "fec8605d59fc2ae0c6c1aefc0c7c7a9769732017c0ce07f7a9cfffa7b4404f20"
dependencies = [
"once_cell",
"pest",
@@ -1413,18 +1414,18 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "proc-macro2"
version = "1.0.47"
version = "1.0.48"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725"
checksum = "e9d89e5dba24725ae5678020bf8f1357a9aa7ff10736b551adbcd3f8d17d766f"
dependencies = [
"unicode-ident",
]
[[package]]
name = "prost"
version = "0.11.2"
version = "0.11.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0841812012b2d4a6145fae9a6af1534873c32aa67fff26bd09f8fa42c83f95a"
checksum = "c0b18e655c21ff5ac2084a5ad0611e827b3f92badf79f4910b5a5c58f4d87ff0"
dependencies = [
"bytes",
"prost-derive",
@@ -1471,9 +1472,9 @@ dependencies = [
[[package]]
name = "quote"
version = "1.0.21"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
checksum = "556d0f47a940e895261e77dc200d5eadfc6ef644c179c6f5edfc105e3a2292c8"
dependencies = [
"proc-macro2",
]
@@ -1734,10 +1735,16 @@ dependencies = [
]
[[package]]
name = "ryu"
name = "rustversion"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70"
[[package]]
name = "ryu"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde"
[[package]]
name = "schannel"
@@ -1766,9 +1773,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "scratch"
version = "1.0.2"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898"
checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2"
[[package]]
name = "secp256k1"
@@ -1816,18 +1823,18 @@ dependencies = [
[[package]]
name = "serde"
version = "1.0.147"
version = "1.0.151"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
checksum = "97fed41fc1a24994d044e6db6935e69511a1153b52c15eb42493b26fa87feba0"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.147"
version = "1.0.151"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
checksum = "255abe9a125a985c05190d687b320c12f9b1f0b99445e608c21ba0782c719ad8"
dependencies = [
"proc-macro2",
"quote",
@@ -1836,9 +1843,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.88"
version = "1.0.90"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e8b3801309262e8184d9687fb697586833e939767aea0dda89f5a8e650e8bd7"
checksum = "8778cc0b528968fe72abec38b5db5a20a70d148116cd9325d2bc5f5180ca3faf"
dependencies = [
"indexmap",
"itoa",
@@ -1848,9 +1855,9 @@ dependencies = [
[[package]]
name = "sha-1"
version = "0.10.0"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f"
checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c"
dependencies = [
"cfg-if",
"cpufeatures",
@@ -1913,9 +1920,9 @@ dependencies = [
[[package]]
name = "syn"
version = "1.0.103"
version = "1.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d"
checksum = "09ee3a69cd2c7e06684677e5629b3878b253af05e4714964204279c6bc02cf0b"
dependencies = [
"proc-macro2",
"quote",
@@ -1953,18 +1960,18 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.37"
version = "1.0.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e"
checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.37"
version = "1.0.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb"
checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f"
dependencies = [
"proc-macro2",
"quote",
@@ -1997,9 +2004,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
[[package]]
name = "tokio"
version = "1.22.0"
version = "1.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3"
checksum = "eab6d665857cc6ca78d6e80303a02cea7a7851e85dfbd77cbdc09bd129f1ef46"
dependencies = [
"autocfg 1.1.0",
"bytes",
@@ -2013,7 +2020,7 @@ dependencies = [
"socket2",
"tokio-macros",
"tracing",
"winapi",
"windows-sys 0.42.0",
]
[[package]]
@@ -2028,9 +2035,9 @@ dependencies = [
[[package]]
name = "tokio-macros"
version = "1.8.0"
version = "1.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484"
checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8"
dependencies = [
"proc-macro2",
"quote",
@@ -2086,18 +2093,18 @@ dependencies = [
[[package]]
name = "toml"
version = "0.5.9"
version = "0.5.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7"
checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f"
dependencies = [
"serde",
]
[[package]]
name = "tonic"
version = "0.8.2"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55b9af819e54b8f33d453655bef9b9acc171568fb49523078d0cc4e7484200ec"
checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb"
dependencies = [
"async-stream",
"async-trait",
@@ -2147,9 +2154,9 @@ dependencies = [
[[package]]
name = "tower-http"
version = "0.3.4"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba"
checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858"
dependencies = [
"bitflags",
"bytes",
@@ -2305,9 +2312,9 @@ dependencies = [
[[package]]
name = "typenum"
version = "1.15.0"
version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]]
name = "ucd-trie"
@@ -2323,9 +2330,9 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992"
[[package]]
name = "unicode-ident"
version = "1.0.5"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc"
[[package]]
name = "unicode-normalization"

View File

@@ -1,6 +1,6 @@
[package]
name = "nostr-rs-relay"
version = "0.7.2"
version = "0.7.13"
edition = "2021"
authors = ["Greg Heartsfield <scsibug@imap.cc>"]
description = "A relay implementation for the Nostr protocol"

View File

@@ -1,4 +1,4 @@
FROM docker.io/library/rust:1.65.0@sha256:1bca14676a365d0ed37a1e2a1da86c2bcf883fdf6e6886469434763d94d4afd5 as builder
FROM docker.io/library/rust:1.66.0@sha256:359949280cebefe93ccb33089fe25111a3aadfe99eac4b6cbe8ec3e1b571dacb as builder
RUN USER=root cargo install cargo-auditable
RUN USER=root cargo new --bin nostr-rs-relay
@@ -17,7 +17,7 @@ COPY ./src ./src
RUN rm ./target/release/deps/nostr*relay*
RUN cargo auditable build --release --locked
FROM docker.io/library/debian:bullseye-20221024-slim@sha256:76cdda8fe5eb597ef5e712e4c9a9f5f1fb119e69f353daaa7bd6d0f6e66e541d
FROM docker.io/library/debian:bullseye-20221205-slim@sha256:25f10b4f1ded5341a3ca0a30290ff3cd5639415f0c5a2222d5e7d5dd72952aa1
ARG APP=/usr/src/app
ARG APP_DATA=/usr/src/app/db

View File

@@ -1,8 +1,8 @@
# [nostr-rs-relay](https://git.sr.ht/~gheartsfield/nostr-rs-relay)
This is a [nostr](https://github.com/nostr-protocol/nostr) relay, written in
Rust. It currently supports the entire relay protocol, and has a
SQLite persistence layer.
This is a [nostr](https://github.com/nostr-protocol/nostr) relay,
written in Rust. It currently supports the entire relay protocol, and
persists data with SQLite.
The project master repository is available on
[sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is
@@ -37,15 +37,32 @@ application. Use a bind mount to store the SQLite database outside of
the container image, and map the container's 8080 port to a host port
(7000 in the example below).
The examples below start a rootless podman container, mapping a local
data directory and config file.
```console
$ docker build -t nostr-rs-relay .
$ podman build -t nostr-rs-relay .
$ docker run -it -p 7000:8080 \
--mount src=$(pwd)/data,target=/usr/src/app/db,type=bind nostr-rs-relay
$ mkdir data
[2021-12-31T19:58:31Z INFO nostr_rs_relay] listening on: 0.0.0.0:8080
[2021-12-31T19:58:31Z INFO nostr_rs_relay::db] opened database "/usr/src/app/db/nostr.db" for writing
[2021-12-31T19:58:31Z INFO nostr_rs_relay::db] DB version = 2
$ podman unshare chown 100:100 data
$ podman run -it --rm -p 7000:8080 \
--user=100:100 \
-v $(pwd)/data:/usr/src/app/db:Z \
-v $(pwd)/config.toml:/usr/src/app/config.toml:ro,Z \
--name nostr-relay nostr-rs-relay:latest
Nov 19 15:31:15.013 INFO nostr_rs_relay: Starting up from main
Nov 19 15:31:15.017 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080
Nov 19 15:31:15.019 INFO nostr_rs_relay::server: db writer created
Nov 19 15:31:15.019 INFO nostr_rs_relay::server: control message listener started
Nov 19 15:31:15.019 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=4)
Nov 19 15:31:15.019 INFO nostr_rs_relay::db: opened database "/usr/src/app/db/nostr.db" for writing
Nov 19 15:31:15.019 INFO nostr_rs_relay::schema: DB version = 0
Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: database pragma/schema initialized to v7, and ready
Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: All migration scripts completed successfully. Welcome to v7.
Nov 19 15:31:15.521 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=128)
```
Use a `nostr` client such as

View File

@@ -62,8 +62,24 @@ reject_future_seconds = 1800
[limits]
# Limit events created per second, averaged over one minute. Must be
# an integer. If not set (or set to 0), defaults to unlimited.
#messages_per_sec = 0
# an integer. If not set (or set to 0), defaults to unlimited. Note:
# this is for the server as a whole, not per-connection.
# messages_per_sec = 0
# Limit client subscriptions created per second, averaged over one
# minute. Must be an integer. If not set (or set to 0), defaults to
# unlimited.
#subscriptions_per_min = 0
# UNIMPLEMENTED...
# Limit how many concurrent database connections a client can have.
# This prevents a single client from starting too many expensive
# database queries. Must be an integer. If not set (or set to 0),
# defaults to unlimited (subject to subscription limits).
#db_conns_per_client = 0
# Limit blocking threads used for database connections. Defaults to 16.
#max_blocking_threads = 16
# Limit the maximum size of an EVENT message. Defaults to 128 KB.
# Set to 0 for unlimited.

View File

@@ -1 +1,4 @@
edition = "2021"
#max_width = 140
#chain_width = 100
#fn_call_width = 100

View File

@@ -52,6 +52,9 @@ pub struct Retention {
#[allow(unused)]
pub struct Limits {
pub messages_per_sec: Option<u32>, // Artificially slow down event writing to limit disk consumption (averaged over 1 minute)
pub subscriptions_per_min: Option<u32>, // Artificially slow down request (db query) creation to prevent abuse (averaged over 1 minute)
pub db_conns_per_client: Option<u32>, // How many concurrent database queries (not subscriptions) may a client have?
pub max_blocking_threads: usize,
pub max_event_bytes: Option<usize>, // Maximum size of an EVENT message
pub max_ws_message_bytes: Option<usize>,
pub max_ws_frame_bytes: Option<usize>,
@@ -214,6 +217,9 @@ impl Default for Settings {
},
limits: Limits {
messages_per_sec: None,
subscriptions_per_min: None,
db_conns_per_client: None,
max_blocking_threads: 16,
max_event_bytes: Some(2 << 17), // 128K
max_ws_message_bytes: Some(2 << 17), // 128K
max_ws_frame_bytes: Some(2 << 17), // 128K

View File

@@ -5,7 +5,7 @@ use crate::error::Result;
use crate::subscription::Subscription;
use std::collections::HashMap;
use tracing::{debug, info};
use tracing::{debug, trace};
use uuid::Uuid;
/// A subscription identifier has a maximum length
@@ -46,6 +46,11 @@ impl ClientConn {
&self.subscriptions
}
/// Check if the given subscription already exists
pub fn has_subscription(&self, sub: &Subscription) -> bool {
self.subscriptions.values().any(|x| x == sub)
}
/// Get a short prefix of the client's unique identifier, suitable
/// for logging.
#[must_use]
@@ -69,7 +74,7 @@ impl ClientConn {
// prevent arbitrarily long subscription identifiers from
// being used.
if sub_id_len > MAX_SUBSCRIPTION_ID_LEN {
info!(
debug!(
"ignoring sub request with excessive length: ({})",
sub_id_len
);
@@ -78,8 +83,12 @@ impl ClientConn {
// check if an existing subscription exists, and replace if so
if self.subscriptions.contains_key(&k) {
self.subscriptions.remove(&k);
self.subscriptions.insert(k, s);
debug!("replaced existing subscription");
self.subscriptions.insert(k, s.clone());
trace!(
"replaced existing subscription (cid: {}, sub: {:?})",
self.get_client_prefix(),
s.get_id()
);
return Ok(());
}
@@ -89,9 +98,10 @@ impl ClientConn {
}
// add subscription
self.subscriptions.insert(k, s);
debug!(
"registered new subscription, currently have {} active subs",
self.subscriptions.len()
trace!(
"registered new subscription, currently have {} active subs (cid: {})",
self.subscriptions.len(),
self.get_client_prefix(),
);
Ok(())
}
@@ -100,8 +110,8 @@ impl ClientConn {
pub fn unsubscribe(&mut self, c: &Close) {
// TODO: return notice if subscription did not exist.
self.subscriptions.remove(&c.id);
debug!(
"removed subscription, currently have {} active subs (cid={})",
trace!(
"removed subscription, currently have {} active subs (cid: {})",
self.subscriptions.len(),
self.get_client_prefix(),
);

258
src/db.rs
View File

@@ -39,6 +39,14 @@ pub struct SubmittedEvent {
/// Database file
pub const DB_FILE: &str = "nostr.db";
/// How frequently to run maintenance
/// How many persisted events before DB maintenannce is triggered.
pub const EVENT_MAINTENANCE_FREQ_SEC: u64 = 60;
/// How many persisted events before we pause for backups.
/// It isn't clear this is enough to make the online backup API work yet.
pub const EVENT_COUNT_BACKUP_PAUSE_TRIGGER: usize = 1000;
/// Build a database connection pool.
/// # Panics
///
@@ -76,6 +84,7 @@ pub fn build_pool(
.test_on_check_out(true) // no noticeable performance hit
.min_idle(Some(min_size))
.max_size(max_size)
.max_lifetime(Some(Duration::from_secs(30)))
.build(manager)
.unwrap();
info!(
@@ -85,6 +94,46 @@ pub fn build_pool(
pool
}
/// Perform normal maintenance
pub fn optimize_db(conn: &mut PooledConnection) -> Result<()> {
let start = Instant::now();
conn.execute_batch("PRAGMA optimize;")?;
info!("optimize ran in {:?}", start.elapsed());
Ok(())
}
#[derive(Debug)]
enum SqliteReturnStatus {
SqliteOk,
SqliteBusy,
SqliteError,
SqliteOther(u64),
}
/// Checkpoint/Truncate WAL
pub fn checkpoint_db(conn: &mut PooledConnection) -> Result<()> {
let query = "PRAGMA wal_checkpoint(TRUNCATE);";
let start = Instant::now();
let (cp_result, wal_size, _frames_checkpointed) = conn.query_row(query, [], |row| {
let checkpoint_result: u64 = row.get(0)?;
let wal_size: u64 = row.get(1)?;
let frames_checkpointed: u64 = row.get(2)?;
Ok((checkpoint_result, wal_size, frames_checkpointed))
})?;
let result = match cp_result {
0 => SqliteReturnStatus::SqliteOk,
1 => SqliteReturnStatus::SqliteBusy,
2 => SqliteReturnStatus::SqliteError,
x => SqliteReturnStatus::SqliteOther(x),
};
info!(
"checkpoint ran in {:?} (result: {:?}, WAL size: {})",
start.elapsed(),
result,
wal_size
);
Ok(())
}
/// Spawn a database writer that persists events to the SQLite store.
pub async fn db_writer(
settings: Settings,
@@ -107,7 +156,7 @@ pub async fn db_writer(
&settings,
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
1,
4,
2,
false,
);
if settings.database.in_memory {
@@ -124,6 +173,10 @@ pub async fn db_writer(
let rps_setting = settings.limits.messages_per_sec;
let mut most_recent_rate_limit = Instant::now();
let mut lim_opt = None;
// Constant writing has interfered with online backups. Keep
// track of how long since we've given the backups a chance to
// run.
let mut backup_pause_counter: usize = 0;
let clock = governor::clock::QuantaClock::default();
if let Some(rps) = rps_setting {
if rps > 0 {
@@ -188,9 +241,10 @@ pub async fn db_writer(
event.get_author_prefix()
);
} else {
info!("rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)",
uv.name.to_string(),
event.get_author_prefix()
info!(
"rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)",
uv.name.to_string(),
event.get_author_prefix()
);
notice_tx
.try_send(Notice::blocked(
@@ -232,6 +286,7 @@ pub async fn db_writer(
);
event_write = true
} else {
log_pool_stats("writer", &pool);
match write_event(&mut pool.get()?, &event) {
Ok(updated) => {
if updated == 0 {
@@ -256,6 +311,12 @@ pub async fn db_writer(
notice_tx.try_send(Notice::error(event.id, msg)).ok();
}
}
backup_pause_counter += 1;
if backup_pause_counter > EVENT_COUNT_BACKUP_PAUSE_TRIGGER {
info!("pausing db write thread for a moment...");
thread::sleep(Duration::from_millis(500));
backup_pause_counter = 0
}
}
// use rate limit, if defined, and if an event was actually written.
@@ -302,7 +363,8 @@ pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
)?;
if ins_count == 0 {
// if the event was a duplicate, no need to insert event or
// pubkey references. This will abort the txn.
// pubkey references.
tx.rollback().ok();
return Ok(ins_count);
}
// remember primary key of the event most recently inserted.
@@ -320,9 +382,9 @@ pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
// if tagvalue is lowercase hex;
if is_lower_hex(tagval) && (tagval.len() % 2 == 0) {
tx.execute(
"INSERT OR IGNORE INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3)",
params![ev_id, &tagname, hex::decode(tagval).ok()],
)?;
"INSERT OR IGNORE INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3)",
params![ev_id, &tagname, hex::decode(tagval).ok()],
)?;
} else {
tx.execute(
"INSERT OR IGNORE INTO tag (event_id, name, value) VALUES (?1, ?2, ?3)",
@@ -428,14 +490,13 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
// if the filter is malformed, don't return anything.
if f.force_no_match {
let empty_query =
"SELECT DISTINCT(e.content), e.created_at FROM event e WHERE 1=0".to_owned();
let empty_query = "SELECT e.content, e.created_at FROM event e WHERE 1=0".to_owned();
// query parameters for SQLite
let empty_params: Vec<Box<dyn ToSql>> = vec![];
return (empty_query, empty_params);
}
let mut query = "SELECT DISTINCT(e.content), e.created_at FROM event e ".to_owned();
let mut query = "SELECT e.content, e.created_at FROM event e".to_owned();
// query parameters for SQLite
let mut params: Vec<Box<dyn ToSql>> = vec![];
@@ -471,8 +532,14 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
}
}
}
let authors_clause = format!("({})", auth_searches.join(" OR "));
filter_components.push(authors_clause);
if !authvec.is_empty() {
let authors_clause = format!("({})", auth_searches.join(" OR "));
filter_components.push(authors_clause);
} else {
// if the authors list was empty, we should never return
// any results.
filter_components.push("false".to_owned());
}
}
// Query for Kind
if let Some(ks) = &f.kinds {
@@ -505,8 +572,14 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
}
}
}
let id_clause = format!("({})", id_searches.join(" OR "));
filter_components.push(id_clause);
if !idvec.is_empty() {
let id_clause = format!("({})", id_searches.join(" OR "));
filter_components.push(id_clause);
} else {
// if the ids list was empty, we should never return
// any results.
filter_components.push("false".to_owned());
}
}
// Query for tags
if let Some(map) = &f.tags {
@@ -526,7 +599,10 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
let str_clause = format!("value IN ({})", repeat_vars(str_vals.len()));
let blob_clause = format!("value_hex IN ({})", repeat_vars(blob_vals.len()));
// find evidence of the target tag name/value existing for this event.
let tag_clause = format!("e.id IN (SELECT e.id FROM event e LEFT JOIN tag t on e.id=t.event_id WHERE hidden!=TRUE and (name=? AND ({} OR {})))", str_clause, blob_clause);
let tag_clause = format!(
"e.id IN (SELECT e.id FROM event e LEFT JOIN tag t on e.id=t.event_id WHERE hidden!=TRUE and (name=? AND ({} OR {})))",
str_clause, blob_clause
);
// add the tag name as the first parameter
params.push(Box::new(key.to_string()));
// add all tag values that are plain strings as params
@@ -579,13 +655,55 @@ fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
// encapsulate subqueries into select statements
let subqueries_selects: Vec<String> = subqueries
.iter()
.map(|s| format!("SELECT content, created_at FROM ({})", s))
.map(|s| format!("SELECT distinct content, created_at FROM ({})", s))
.collect();
let query: String = subqueries_selects.join(" UNION ");
trace!("final query string: {}", query);
(query, params)
}
/// Check if the pool is fully utilized
fn _pool_at_capacity(pool: &SqlitePool) -> bool {
let state: r2d2::State = pool.state();
state.idle_connections == 0
}
/// Log pool stats
fn log_pool_stats(name: &str, pool: &SqlitePool) {
let state: r2d2::State = pool.state();
let in_use_cxns = state.connections - state.idle_connections;
trace!(
"DB pool {:?} usage (in_use: {}, available: {})",
name,
in_use_cxns,
state.connections
);
if state.connections == in_use_cxns {
debug!("DB pool {:?} is empty (in_use: {})", name, in_use_cxns);
}
}
/// Perform database maintenance on a regular basis
pub async fn db_maintenance(pool: SqlitePool) {
tokio::task::spawn(async move {
loop {
tokio::select! {
_ = tokio::time::sleep(Duration::from_secs(EVENT_MAINTENANCE_FREQ_SEC)) => {
if let Ok(mut conn) = pool.get() {
// the busy timer will block writers, so don't set
// this any higher than you want max latency for event
// writes.
conn.busy_timeout(Duration::from_secs(1)).ok();
debug!("running database optimizer");
optimize_db(&mut conn).ok();
debug!("running wal_checkpoint(TRUNCATE)");
checkpoint_db(&mut conn).ok();
}
}
};
}
});
}
/// Perform a database query using a subscription.
///
/// The [`Subscription`] is converted into a SQL query. Each result
@@ -599,45 +717,116 @@ pub async fn db_query(
query_tx: tokio::sync::mpsc::Sender<QueryResult>,
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
) {
let pre_spawn_start = Instant::now();
task::spawn_blocking(move || {
trace!("going to query for: {:?}", sub);
let mut row_count: usize = 0;
let db_queue_time = pre_spawn_start.elapsed();
// if the queue time was very long (>5 seconds), spare the DB and abort.
if db_queue_time > Duration::from_secs(5) {
info!(
"shedding DB query load from {:?} (cid: {}, sub: {:?})",
db_queue_time, client_id, sub.id
);
return Ok(());
}
// otherwise, report queuing time if it is slow
else if db_queue_time > Duration::from_secs(1) {
debug!(
"(slow) DB query queued for {:?} (cid: {}, sub: {:?})",
db_queue_time, client_id, sub.id
);
}
let start = Instant::now();
let mut row_count: usize = 0;
// generate SQL query
let (q, p) = query_from_sub(&sub);
trace!("SQL generated in {:?}", start.elapsed());
let sql_gen_elapsed = start.elapsed();
if sql_gen_elapsed > Duration::from_millis(10) {
debug!("SQL (slow) generated in {:?}", start.elapsed());
}
// show pool stats
debug!("DB pool stats: {:?}", pool.state());
log_pool_stats("reader", &pool);
// cutoff for displaying slow queries
let slow_cutoff = Duration::from_millis(2000);
// any client that doesn't cause us to generate new rows in 5
// seconds gets dropped.
let abort_cutoff = Duration::from_secs(5);
let start = Instant::now();
let mut slow_first_event;
let mut last_successful_send = Instant::now();
if let Ok(conn) = pool.get() {
// execute the query. Don't cache, since queries vary so much.
let mut stmt = conn.prepare(&q)?;
let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?;
let mut first_result = true;
while let Some(row) = event_rows.next()? {
let first_event_elapsed = start.elapsed();
slow_first_event = first_event_elapsed >= slow_cutoff;
if first_result {
debug!(
"time to first result: {:?} (cid={}, sub={:?})",
start.elapsed(),
client_id,
sub.id
"first result in {:?} (cid: {}, sub: {:?})",
first_event_elapsed, client_id, sub.id
);
first_result = false;
}
// check if this is still active
// TODO: check every N rows
if abandon_query_rx.try_recv().is_ok() {
debug!("query aborted (sub={:?})", sub.id);
// logging for slow queries; show sub and SQL.
// to reduce logging; only show 1/16th of clients (leading 0)
if row_count == 0 && slow_first_event && client_id.starts_with("0") {
debug!(
"query req (slow): {:?} (cid: {}, sub: {:?})",
sub, client_id, sub.id
);
debug!(
"query string (slow): {} (cid: {}, sub: {:?})",
q, client_id, sub.id
);
} else {
trace!(
"query req: {:?} (cid: {}, sub: {:?})",
sub,
client_id,
sub.id
);
trace!(
"query string: {} (cid: {}, sub: {:?})",
q,
client_id,
sub.id
);
}
// check if this is still active; every 100 rows
if row_count % 100 == 0 && abandon_query_rx.try_recv().is_ok() {
debug!("query aborted (cid: {}, sub: {:?})", client_id, sub.id);
return Ok(());
}
row_count += 1;
let event_json = row.get(0)?;
loop {
if query_tx.capacity() != 0 {
// we have capacity to add another item
break;
} else {
// the queue is full
trace!("db reader thread is stalled");
if last_successful_send + abort_cutoff < Instant::now() {
// the queue has been full for too long, abort
info!("aborting database query due to slow client");
let ok: Result<()> = Ok(());
return ok;
}
// give the queue a chance to clear before trying again
thread::sleep(Duration::from_millis(100));
}
}
// TODO: we could use try_send, but we'd have to juggle
// getting the query result back as part of the error
// result.
query_tx
.blocking_send(QueryResult {
sub_id: sub.get_id(),
event: event_json,
})
.ok();
last_successful_send = Instant::now();
}
query_tx
.blocking_send(QueryResult {
@@ -646,11 +835,12 @@ pub async fn db_query(
})
.ok();
debug!(
"query completed ({} rows) in {:?} (cid={}, sub={:?})",
row_count,
start.elapsed(),
"query completed in {:?} (cid: {}, sub: {:?}, db_time: {:?}, rows: {})",
pre_spawn_start.elapsed(),
client_id,
sub.id
sub.id,
start.elapsed(),
row_count
);
} else {
warn!("Could not get a database connection for querying");

View File

@@ -3,7 +3,7 @@ use crate::utils::is_hex;
use hex;
/// Types of hexadecimal queries.
#[derive(PartialEq, Eq, Debug, Clone)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)]
pub enum HexSearch {
// when no range is needed, exact 32-byte
Exact(Vec<u8>),

View File

@@ -517,7 +517,7 @@ impl Verifier {
Ok(updated) => {
if updated != 0 {
info!(
"persisted event: {:?} in {:?}",
"persisted event (new verified pubkey): {:?} in {:?}",
event.get_event_id_prefix(),
start.elapsed()
);
@@ -721,7 +721,7 @@ pub fn query_oldest_user_verification(
earliest: u64,
) -> Result<VerificationRecord> {
let tx = conn.transaction()?;
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v INNER JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
let mut stmt = tx.prepare_cached(query)?;
let fields = stmt.query_row(params![earliest, earliest], |r| {
let rowid: u64 = r.get(0)?;

View File

@@ -16,11 +16,11 @@ pub const STARTUP_SQL: &str = r##"
PRAGMA main.synchronous=NORMAL;
PRAGMA foreign_keys = ON;
PRAGMA journal_size_limit=32768;
pragma mmap_size = 536870912; -- 512MB of mmap
pragma mmap_size = 17179869184; -- cap mmap at 16GB
"##;
/// Latest database version
pub const DB_VERSION: usize = 7;
pub const DB_VERSION: usize = 11;
/// Schema definition
const INIT_SQL: &str = formatcp!(
@@ -48,10 +48,10 @@ content TEXT NOT NULL -- serialized json of event object
-- Event Indexes
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS author_index ON event(author);
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
-- Tag Table
-- Tag values are stored as either a BLOB (if they come in as a
@@ -67,6 +67,8 @@ FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
CREATE INDEX IF NOT EXISTS tag_val_hex_index ON tag(value_hex);
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
-- NIP-05 User Validation
CREATE TABLE IF NOT EXISTS user_verification (
@@ -157,6 +159,19 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
if curr_version == 6 {
curr_version = mig_6_to_7(conn)?;
}
if curr_version == 7 {
curr_version = mig_7_to_8(conn)?;
}
if curr_version == 8 {
curr_version = mig_8_to_9(conn)?;
}
if curr_version == 9 {
curr_version = mig_9_to_10(conn)?;
}
if curr_version == 10 {
curr_version = mig_10_to_11(conn)?;
}
if curr_version == DB_VERSION {
info!(
"All migration scripts completed successfully. Welcome to v{}.",
@@ -373,3 +388,84 @@ PRAGMA user_version = 7;
}
Ok(7)
}
fn mig_7_to_8(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 7->8");
// Remove redundant indexes, and add a better multi-column index.
let upgrade_sql = r##"
DROP INDEX IF EXISTS created_at_index;
DROP INDEX IF EXISTS kind_index;
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
PRAGMA user_version = 8;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v7 -> v8");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(8)
}
fn mig_8_to_9(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 8->9");
// Those old indexes were actually helpful...
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
PRAGMA user_version = 9;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v8 -> v9");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(9)
}
fn mig_9_to_10(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 9->10");
// Those old indexes were actually helpful...
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
PRAGMA user_version = 10;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v9 -> v10");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(10)
}
fn mig_10_to_11(conn: &mut PooledConnection) -> Result<usize> {
info!("database schema needs update from 10->11");
// Those old indexes were actually helpful...
let upgrade_sql = r##"
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
reindex;
pragma optimize;
PRAGMA user_version = 11;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v10 -> v11");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
Ok(11)
}

View File

@@ -14,6 +14,7 @@ use crate::notice::Notice;
use crate::subscription::Subscription;
use futures::SinkExt;
use futures::StreamExt;
use governor::{Jitter, Quota, RateLimiter};
use http::header::HeaderMap;
use hyper::header::ACCEPT;
use hyper::service::{make_service_fn, service_fn};
@@ -21,6 +22,7 @@ use hyper::upgrade::Upgraded;
use hyper::{
header, server::conn::AddrStream, upgrade, Body, Request, Response, Server, StatusCode,
};
use rusqlite::OpenFlags;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::collections::HashMap;
@@ -244,6 +246,14 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
let rt = Builder::new_multi_thread()
.enable_all()
.thread_name("tokio-ws")
// limit concurrent SQLite blocking threads
.max_blocking_threads(settings.limits.max_blocking_threads)
.on_thread_start(|| {
trace!("started new thread");
})
.on_thread_stop(|| {
trace!("stopping thread");
})
.build()
.unwrap();
// start tokio
@@ -301,6 +311,17 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
}
}
}
// build a connection pool for DB maintenance
let maintenance_pool = db::build_pool(
"maintenance writer",
&settings,
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
1,
1,
false,
);
db::db_maintenance(maintenance_pool).await;
// listen for (external to tokio) shutdown request
let controlled_shutdown = invoke_shutdown.clone();
tokio::spawn(async move {
@@ -311,6 +332,7 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
controlled_shutdown.send(()).ok();
}
Err(std::sync::mpsc::RecvError) => {
// FIXME: spurious error on startup?
debug!("shutdown requestor is disconnected");
}
};
@@ -329,8 +351,7 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
let pool = db::build_pool(
"client query",
&settings,
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY
| rusqlite::OpenFlags::SQLITE_OPEN_SHARED_CACHE,
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
db_min_conn,
db_max_conn,
true,
@@ -431,17 +452,34 @@ async fn nostr_server(
event_tx: mpsc::Sender<SubmittedEvent>,
mut shutdown: Receiver<()>,
) {
// the time this websocket nostr server started
let orig_start = Instant::now();
// get a broadcast channel for clients to communicate on
let mut bcast_rx = broadcast.subscribe();
// Track internal client state
let mut conn = conn::ClientConn::new(client_info.remote_ip);
// subscription creation rate limiting
let mut sub_lim_opt = None;
// 100ms jitter when the rate limiter returns
let jitter = Jitter::up_to(Duration::from_millis(100));
let sub_per_min_setting = settings.limits.subscriptions_per_min;
if let Some(sub_per_min) = sub_per_min_setting {
if sub_per_min > 0 {
trace!("Rate limits for sub creation ({}/min)", sub_per_min);
let quota_time = core::num::NonZeroU32::new(sub_per_min).unwrap();
let quota = Quota::per_minute(quota_time);
sub_lim_opt = Some(RateLimiter::direct(quota));
}
}
// Use the remote IP as the client identifier
let cid = conn.get_client_prefix();
// Create a channel for receiving query results from the database.
// we will send out the tx handle to any query we generate.
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(256);
// this has capacity for some of the larger requests we see, which
// should allow the DB thread to release the handle earlier.
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(20000);
// Create channel for receiving NOTICEs
let (notice_tx, mut notice_rx) = mpsc::channel::<Notice>(32);
let (notice_tx, mut notice_rx) = mpsc::channel::<Notice>(128);
// last time this client sent data (message, ping, etc.)
let mut last_message_time = Instant::now();
@@ -459,19 +497,18 @@ async fn nostr_server(
// when these subscriptions are cancelled, make a message
// available to the executing query so it knows to stop.
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
// for stats, keep track of how many events the client published,
// and how many it received from queries.
let mut client_published_event_count: usize = 0;
let mut client_received_event_count: usize = 0;
debug!("new connection for client: {}, ip: {:?}", cid, conn.ip());
debug!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
if let Some(ua) = client_info.user_agent {
debug!("client: {} has user-agent: {:?}", cid, ua);
debug!("cid: {}, user-agent: {:?}", cid, ua);
}
loop {
tokio::select! {
_ = shutdown.recv() => {
info!("Close connection down due to shutdown, client: {}, ip: {:?}", cid, conn.ip());
info!("Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?}", cid, conn.ip(), orig_start.elapsed());
// server shutting down, exit loop
break;
},
@@ -513,7 +550,7 @@ async fn nostr_server(
// TODO: serialize at broadcast time, instead of
// once for each consumer.
if let Ok(event_str) = serde_json::to_string(&global_event) {
debug!("sub match for client: {}, sub: {:?}, event: {:?}",
trace!("sub match for client: {}, sub: {:?}, event: {:?}",
cid, s,
global_event.get_event_id_prefix());
// create an event response and send it
@@ -552,17 +589,17 @@ async fn nostr_server(
Err(WsError::AlreadyClosed | WsError::ConnectionClosed |
WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
=> {
debug!("websocket close from client: {}, ip: {:?}",cid, conn.ip());
debug!("websocket close from client (cid: {}, ip: {:?})",cid, conn.ip());
break;
},
Some(Err(WsError::Io(e))) => {
// IO errors are considered fatal
warn!("IO error (client: {}, ip: {:?}): {:?}", cid, conn.ip(), e);
warn!("IO error (cid: {}, ip: {:?}): {:?}", cid, conn.ip(), e);
break;
}
x => {
// default condition on error is to close the client connection
info!("unknown error (client: {}, ip: {:?}): {:?} (closing conn)", cid, conn.ip(), x);
info!("unknown error (cid: {}, ip: {:?}): {:?} (closing conn)", cid, conn.ip(), x);
break;
}
};
@@ -577,7 +614,7 @@ async fn nostr_server(
match parsed {
Ok(e) => {
let id_prefix:String = e.id.chars().take(8).collect();
debug!("successfully parsed/validated event: {:?} from client: {}", id_prefix, cid);
debug!("successfully parsed/validated event: {:?} (cid: {})", id_prefix, cid);
// check if the event is too far in the future.
if e.is_valid_timestamp(settings.options.reject_future_seconds) {
// Write this to the database.
@@ -594,32 +631,41 @@ async fn nostr_server(
}
},
Err(e) => {
info!("client: {} sent an invalid event", cid);
info!("client sent an invalid event (cid: {})", cid);
ws_stream.send(make_notice_message(Notice::invalid(evid, &format!("{}", e)))).await.ok();
}
}
},
Ok(NostrMessage::SubMsg(s)) => {
debug!("client: {} requesting a subscription", cid);
debug!("subscription requested (cid: {}, sub: {:?})", cid, s.id);
// subscription handling consists of:
// * check for rate limits
// * registering the subscription so future events can be matched
// * making a channel to cancel to request later
// * sending a request for a SQL query
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
match conn.subscribe(s.clone()) {
Ok(()) => {
// when we insert, if there was a previous query running with the same name, cancel it.
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
previous_query.send(()).ok();
}
// start a database query
db::db_query(s, cid.to_owned(), pool.clone(), query_tx.clone(), abandon_query_rx).await;
},
Err(e) => {
info!("Subscription error: {}", e);
ws_stream.send(make_notice_message(Notice::message(format!("Subscription error: {}", e)))).await.ok();
// Do nothing if the sub already exists.
if !conn.has_subscription(&s) {
if let Some(ref lim) = sub_lim_opt {
lim.until_ready_with_jitter(jitter).await;
}
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
match conn.subscribe(s.clone()) {
Ok(()) => {
// when we insert, if there was a previous query running with the same name, cancel it.
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
previous_query.send(()).ok();
}
// start a database query. this spawns a blocking database query on a worker thread.
db::db_query(s, cid.to_owned(), pool.clone(), query_tx.clone(), abandon_query_rx).await;
},
Err(e) => {
info!("Subscription error: {} (cid: {}, sub: {:?})", e, cid, s.id);
ws_stream.send(make_notice_message(Notice::message(format!("Subscription error: {}", e)))).await.ok();
}
}
}
} else {
info!("client sent duplicate subscription, ignoring (cid: {}, sub: {:?})", cid, s.id);
}
},
Ok(NostrMessage::CloseMsg(cc)) => {
// closing a request simply removes the subscription.
@@ -640,19 +686,19 @@ async fn nostr_server(
}
},
Err(Error::ConnError) => {
debug!("got connection close/error, disconnecting client: {}, ip: {:?}",cid, conn.ip());
debug!("got connection close/error, disconnecting cid: {}, ip: {:?}",cid, conn.ip());
break;
}
Err(Error::EventMaxLengthError(s)) => {
info!("client: {} sent event larger ({} bytes) than max size", cid, s);
info!("client sent event larger ({} bytes) than max size (cid: {})", s, cid);
ws_stream.send(make_notice_message(Notice::message("event exceeded max size".into()))).await.ok();
},
Err(Error::ProtoParseError) => {
info!("client {} sent event that could not be parsed", cid);
info!("client sent event that could not be parsed (cid: {})", cid);
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
},
Err(e) => {
info!("got non-fatal error from client: {}, error: {:?}", cid, e);
info!("got non-fatal error from client (cid: {}, error: {:?}", cid, e);
},
}
},
@@ -663,10 +709,11 @@ async fn nostr_server(
stop_tx.send(()).ok();
}
info!(
"stopping connection for client: {}, ip: {:?} (client sent {} event(s), received {})",
"stopping client connection (cid: {}, ip: {:?}, sent: {} events, recv: {} events, connected: {:?})",
cid,
conn.ip(),
client_published_event_count,
client_received_event_count
client_received_event_count,
orig_start.elapsed()
);
}

View File

@@ -37,6 +37,9 @@ pub struct ReqFilter {
#[serde(skip)]
pub tags: Option<HashMap<char, HashSet<String>>>,
/// Force no matches due to malformed data
// we can't represent it in the req filter, so we don't want to
// erroneously match. This basically indicates the req tried to
// do something invalid.
pub force_no_match: bool,
}