mirror of
https://github.com/scsibug/nostr-rs-relay.git
synced 2025-09-01 03:40:46 -04:00
Compare commits
288 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
6a8c4ed1b5 | ||
|
966c853700 | ||
|
65fd0ed08b | ||
|
0b51675b38 | ||
|
2e22334631 | ||
|
cb2ac4bf0f | ||
|
38dc7789dc | ||
|
ce0e00ffb3 | ||
|
3e4ae4aeec | ||
|
c6a8807485 | ||
|
8137b6211c | ||
|
29effaae23 | ||
|
e5074f2e46 | ||
|
4fd7643907 | ||
|
1e1ec69175 | ||
|
e08647867c | ||
|
ae0f7171ed | ||
|
4f1a912f36 | ||
|
95748647f0 | ||
|
25480e837f | ||
|
b80b54cd9d | ||
|
8ea732cbe5 | ||
|
0f68c4e5c2 | ||
|
dab2cd5792 | ||
|
f411aa6fc2 | ||
|
d31bbda087 | ||
|
5917bc53b2 | ||
|
91177c61a1 | ||
|
53c2a8051c | ||
|
168cf513ac | ||
|
ea204761c9 | ||
|
c270ae1434 | ||
|
64bd983cb6 | ||
|
1c153bc784 | ||
|
dc11d9a619 | ||
|
cd1557787b | ||
|
86bb7aeb9a | ||
|
ce37fc1a2d | ||
|
2cfd384339 | ||
|
8c013107f9 | ||
|
64a4466d30 | ||
|
1596c23eb4 | ||
|
129badd4e1 | ||
|
6f7c080180 | ||
|
af92561ef6 | ||
|
d833a3e40d | ||
|
462eb46642 | ||
|
cf144d503d | ||
|
fb8375aef2 | ||
|
88ac31b549 | ||
|
677b7d39e9 | ||
|
b24d2f9aaa | ||
|
7a3899d852 | ||
|
818108b793 | ||
|
d10348f7e1 | ||
|
8598e443d8 | ||
|
43222d44e5 | ||
|
7c1516c4fb | ||
|
0c72053a49 | ||
|
3f32ff67ab | ||
|
0b9778d6ca | ||
|
9be04120c7 | ||
|
cc06167e06 | ||
|
b6e33f044f | ||
|
1b2c6f9fca | ||
|
0d8d39ad22 | ||
|
0e851d4f71 | ||
|
3c880b2f49 | ||
|
7a4c9266ec | ||
|
e8557d421b | ||
|
7ca9c864f2 | ||
|
838aafd079 | ||
|
e554b10ac2 | ||
|
b0bfaa48fc | ||
|
2e9b1b6ba7 | ||
|
4d9012d94c | ||
|
ffe7aac066 | ||
|
f9695bd0a9 | ||
|
7c4bf5cc8f | ||
|
e2de162931 | ||
|
4f606615eb | ||
|
84a58ebbcd | ||
|
c48e45686d | ||
|
bbe359364a | ||
|
9e9c494367 | ||
|
5fa24bc9f1 | ||
|
4de7490d97 | ||
|
d0f63dc66e | ||
|
06078648c8 | ||
|
cc0fcc5d66 | ||
|
dfb2096653 | ||
|
486508d192 | ||
|
84b43c144b | ||
|
110500bb46 | ||
|
83f6b11de7 | ||
|
6d1244434b | ||
|
5a91419d34 | ||
|
7adc5c9af7 | ||
|
9dd4571bee | ||
|
9db5a26b9c | ||
|
ac345b5744 | ||
|
675662c7fb | ||
|
505b0cb71f | ||
|
e8aa450802 | ||
|
5a8860bb09 | ||
|
11e43eccf9 | ||
|
50577b2dfa | ||
|
a6cb6f8486 | ||
|
ae5bf98d87 | ||
|
1cf9d719f0 | ||
|
311f4b5283 | ||
|
14b5a51e3a | ||
|
8ecce3f566 | ||
|
caffbbbede | ||
|
81045ad3d0 | ||
|
72f8a1aa5c | ||
|
274c61bb72 | ||
|
6eeefbcc4c | ||
|
3e8adf978f | ||
|
2af5f9fbe8 | ||
|
2739e49362 | ||
|
f9693f7ac3 | ||
|
8a63d88b0b | ||
|
a4df9445b6 | ||
|
92da9d71f8 | ||
|
6633f8b472 | ||
|
93dfed0a87 | ||
|
bef7ca7e27 | ||
|
a98708ba47 | ||
|
ccf9b8d47b | ||
|
8fa58de49a | ||
|
480c5e4e58 | ||
|
5bd00f9107 | ||
|
36b9f628c7 | ||
|
baeb77af99 | ||
|
29b1e8ce58 | ||
|
786a354776 | ||
|
4fa8616c73 | ||
|
74802522c2 | ||
|
9ce5057af8 | ||
|
217429f538 | ||
|
62a9548c27 | ||
|
c24dce8177 | ||
|
3503cf05ed | ||
|
8738e5baa9 | ||
|
78da92ccca | ||
|
72f1c19b21 | ||
|
283967f8cc | ||
|
08b011ad07 | ||
|
2b03f11e5e | ||
|
e48bae10e6 | ||
|
8774416b92 | ||
|
59933ce25e | ||
|
1b9f364e15 | ||
|
4d983dd1e0 | ||
|
11c33582ef | ||
|
a754477a02 | ||
|
a843eaa939 | ||
|
03a130b0b8 | ||
|
9124f4540a | ||
|
77892b2064 | ||
|
4fe6191aa3 | ||
|
79a982e3ef | ||
|
01d81db617 | ||
|
e6fef37d4e | ||
|
4bbfd77fc1 | ||
|
8da6f6555a | ||
|
5bcc63bd56 | ||
|
035cf34673 | ||
|
be8170342e | ||
|
0a3b15f41f | ||
|
2b4b17dbda | ||
|
5058d98ad6 | ||
|
f4ecd43708 | ||
|
a8f465fdc8 | ||
|
1c14adc766 | ||
|
e894a86566 | ||
|
bedc378624 | ||
|
e1c2a6b758 | ||
|
990bb656e8 | ||
|
168cfc3b26 | ||
|
a36ad378f6 | ||
|
538d139ebf | ||
|
23f7730fea | ||
|
8aa1256254 | ||
|
9ed3391b46 | ||
|
4ad483090e | ||
|
9b351aab9b | ||
|
597749890e | ||
|
1d499cf12b | ||
|
ed3a6b9692 | ||
|
048199e30b | ||
|
414e83f696 | ||
|
225c8f762e | ||
|
887fc28ab2 | ||
|
294d3b99c3 | ||
|
53990672ae | ||
|
9c1b21cbfe | ||
|
2f63417646 | ||
|
3b25160852 | ||
|
34ad549cde | ||
|
f8b1fe5035 | ||
|
f2001dc34a | ||
|
b593001229 | ||
|
5913b9f87a | ||
|
77f35f9f43 | ||
|
9e06cc9482 | ||
|
e66fa4ac42 | ||
|
99e117f620 | ||
|
8250e00f05 | ||
|
c9f87ec563 | ||
|
ceaa01e8b4 | ||
|
bc68cd0c74 | ||
|
97589006fa | ||
|
e31d0729f2 | ||
|
89d96e7ccd | ||
|
7056aae227 | ||
|
753df47443 | ||
|
26a0ce2b32 | ||
|
fa66a0265e | ||
|
234a8ba0ac | ||
|
f679fa0893 | ||
|
4cc313fa2d | ||
|
6502f7dcd7 | ||
|
6ca3e3ffea | ||
|
49c668a07c | ||
|
98c6fa6f39 | ||
|
452bbbb0e5 | ||
|
ee0de6f875 | ||
|
699489ebaf | ||
|
af9da65f71 | ||
|
a72eaec3b8 | ||
|
f1206e76f2 | ||
|
af453548ee | ||
|
df251c821c | ||
|
2d28a95ff7 | ||
|
8c93ef5bc2 | ||
|
1c0fc1326d | ||
|
179928378e | ||
|
c605d75bb4 | ||
|
81e4e2b892 | ||
|
6f166433b5 | ||
|
030b64de62 | ||
|
c7eadb1154 | ||
|
62dc77369d | ||
|
24587435ca | ||
|
a3124ccea4 | ||
|
4e51e61d16 | ||
|
5c8390bbe0 | ||
|
da7968efef | ||
|
7037555516 | ||
|
19ed990c57 | ||
|
d78bbfc290 | ||
|
2924da88bc | ||
|
3024e9fba4 | ||
|
d3da4eb009 | ||
|
19637d612e | ||
|
afc9a0096a | ||
|
3d56262386 | ||
|
6673fcfd11 | ||
|
b5da3fa2b0 | ||
|
850957213e | ||
|
1aa5a5458d | ||
|
620e227699 | ||
|
14e59ed278 | ||
|
5ad383f257 | ||
|
9710ea27aa | ||
|
783a6e1042 | ||
|
4171a8870e | ||
|
8f3891c781 | ||
|
415d32299b | ||
|
5a19a8876f | ||
|
f7f12a7984 | ||
|
20ee5a054c | ||
|
c60519de23 | ||
|
d72e7a57b6 | ||
|
6447ddd974 | ||
|
079722ddd9 | ||
|
3302fb2e81 | ||
|
f415295184 | ||
|
d730bf0c59 | ||
|
2e2e01203b | ||
|
100f890284 | ||
|
0e288fe678 | ||
|
bfc804e18c | ||
|
8a8ee5c425 | ||
|
55bb6bd440 | ||
|
7933abaa48 |
19
.build.yml
Normal file
19
.build.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
image: fedora/latest
|
||||
arch: x86_64
|
||||
artifacts:
|
||||
- nostr-rs-relay/target/release/nostr-rs-relay
|
||||
environment:
|
||||
RUST_LOG: debug
|
||||
packages:
|
||||
- cargo
|
||||
- sqlite-devel
|
||||
sources:
|
||||
- https://git.sr.ht/~gheartsfield/nostr-rs-relay/
|
||||
shell: false
|
||||
tasks:
|
||||
- build: |
|
||||
cd nostr-rs-relay
|
||||
cargo build --release
|
||||
- test: |
|
||||
cd nostr-rs-relay
|
||||
cargo test --release
|
2
.cargo/config.toml
Normal file
2
.cargo/config.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[build]
|
||||
rustflags = ["--cfg", "tokio_unstable"]
|
16
.pre-commit-config.yaml
Normal file
16
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.3.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: check-added-large-files
|
||||
- repo: https://github.com/doublify/pre-commit-rust
|
||||
rev: v1.0
|
||||
hooks:
|
||||
# - id: fmt
|
||||
- id: cargo-check
|
||||
- id: clippy
|
2003
Cargo.lock
generated
2003
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
56
Cargo.toml
56
Cargo.toml
@@ -1,22 +1,46 @@
|
||||
[package]
|
||||
name = "nostr-rs-relay"
|
||||
version = "0.1.4"
|
||||
version = "0.7.14"
|
||||
edition = "2021"
|
||||
authors = ["Greg Heartsfield <scsibug@imap.cc>"]
|
||||
description = "A relay implementation for the Nostr protocol"
|
||||
readme = "README.md"
|
||||
homepage = "https://sr.ht/~gheartsfield/nostr-rs-relay/"
|
||||
repository = "https://git.sr.ht/~gheartsfield/nostr-rs-relay"
|
||||
license = "MIT"
|
||||
keywords = ["nostr", "server"]
|
||||
categories = ["network-programming", "web-programming"]
|
||||
|
||||
[dependencies]
|
||||
log = "^0.4"
|
||||
env_logger = "^0.9"
|
||||
tokio = { version = "^1.14", features = ["full"] }
|
||||
futures = "^0.3"
|
||||
futures-util = "^0.3"
|
||||
tokio-tungstenite = "^0.16"
|
||||
tungstenite = "^0.16"
|
||||
thiserror = "^1"
|
||||
uuid = { version = "^0.8", features = ["v4"] }
|
||||
tracing = "0.1.36"
|
||||
tracing-subscriber = "0.2.0"
|
||||
tokio = { version = "1", features = ["full", "tracing", "signal"] }
|
||||
console-subscriber = "0.1.8"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
tokio-tungstenite = "0.17"
|
||||
tungstenite = "0.17"
|
||||
thiserror = "1"
|
||||
uuid = { version = "1.1.2", features = ["v4"] }
|
||||
config = { version = "0.12", features = ["toml"] }
|
||||
bitcoin_hashes = { version = "0.10", features = ["serde"] }
|
||||
secp256k1 = {version = "0.21", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = {version = "1.0", features = ["preserve_order"]}
|
||||
hex = "0.4"
|
||||
rusqlite = { version = "0.26", features = ["limits","bundled","modern_sqlite", "trace"]}
|
||||
r2d2 = "0.8"
|
||||
r2d2_sqlite = "0.19"
|
||||
lazy_static = "1.4"
|
||||
governor = "0.4"
|
||||
nonzero_ext = "0.3"
|
||||
hyper = { version="0.14", features=["client", "server","http1","http2","tcp"] }
|
||||
hyper-tls = "0.5"
|
||||
http = { version = "0.2" }
|
||||
parse_duration = "2"
|
||||
rand = "0.8"
|
||||
const_format = "0.2.28"
|
||||
regex = "1"
|
||||
|
||||
bitcoin_hashes = { version = "^0.9", features = ["serde"] }
|
||||
secp256k1 = { version = "^0.20", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
|
||||
serde = { version = "^1.0", features = ["derive"] }
|
||||
serde_json = "^1.0"
|
||||
hex = "^0.4"
|
||||
rusqlite = "^0.26"
|
||||
[dev-dependencies]
|
||||
anyhow = "1"
|
||||
|
25
Dockerfile
25
Dockerfile
@@ -1,23 +1,28 @@
|
||||
FROM rust:1.57 as builder
|
||||
FROM docker.io/library/rust:1.66.0 as builder
|
||||
|
||||
RUN USER=root cargo install cargo-auditable
|
||||
RUN USER=root cargo new --bin nostr-rs-relay
|
||||
WORKDIR ./nostr-rs-relay
|
||||
COPY ./Cargo.toml ./Cargo.toml
|
||||
COPY ./Cargo.lock ./Cargo.lock
|
||||
RUN cargo build --release
|
||||
# build dependencies only (caching)
|
||||
RUN cargo auditable build --release --locked
|
||||
# get rid of starter project code
|
||||
RUN rm src/*.rs
|
||||
|
||||
# copy project source code
|
||||
COPY ./src ./src
|
||||
|
||||
# build auditable release using locked deps
|
||||
RUN rm ./target/release/deps/nostr*relay*
|
||||
RUN cargo build --release
|
||||
RUN cargo auditable build --release --locked
|
||||
|
||||
FROM docker.io/library/debian:bullseye-slim
|
||||
|
||||
FROM debian:buster-slim
|
||||
ARG APP=/usr/src/app
|
||||
ARG APP_DATA=/usr/src/app/db
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y ca-certificates tzdata sqlite3 \
|
||||
&& apt-get install -y ca-certificates tzdata sqlite3 libc6 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
EXPOSE 8080
|
||||
@@ -35,9 +40,9 @@ COPY --from=builder /nostr-rs-relay/target/release/nostr-rs-relay ${APP}/nostr-r
|
||||
RUN chown -R $APP_USER:$APP_USER ${APP}
|
||||
|
||||
USER $APP_USER
|
||||
WORKDIR ${APP_DATA}
|
||||
WORKDIR ${APP}
|
||||
|
||||
ENV RUST_LOG=info
|
||||
ENV RUST_LOG=info,nostr_rs_relay=info
|
||||
ENV APP_DATA=${APP_DATA}
|
||||
|
||||
|
||||
CMD ["../nostr-rs-relay"]
|
||||
CMD ./nostr-rs-relay --db ${APP_DATA}
|
||||
|
134
README.md
134
README.md
@@ -1,26 +1,74 @@
|
||||
# [nostr-rs-relay](https://git.sr.ht/~gheartsfield/nostr-rs-relay)
|
||||
|
||||
This is a [nostr](https://github.com/fiatjaf/nostr) relay, written in
|
||||
Rust. It currently supports the entire relay protocol, and has a
|
||||
SQLite persistence layer.
|
||||
This is a [nostr](https://github.com/nostr-protocol/nostr) relay,
|
||||
written in Rust. It currently supports the entire relay protocol, and
|
||||
persists data with SQLite.
|
||||
|
||||
The project master repository is available on
|
||||
[sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is
|
||||
mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
|
||||
|
||||
[](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master?)
|
||||
|
||||
## Features
|
||||
|
||||
[NIPs](https://github.com/nostr-protocol/nips) with a relay-specific implementation are listed here.
|
||||
|
||||
- [x] NIP-01: [Basic protocol flow description](https://github.com/nostr-protocol/nips/blob/master/01.md)
|
||||
* Core event model
|
||||
* Hide old metadata events
|
||||
* Id/Author prefix search
|
||||
- [x] NIP-02: [Contact List and Petnames](https://github.com/nostr-protocol/nips/blob/master/02.md)
|
||||
- [ ] NIP-03: [OpenTimestamps Attestations for Events](https://github.com/nostr-protocol/nips/blob/master/03.md)
|
||||
- [x] NIP-05: [Mapping Nostr keys to DNS-based internet identifiers](https://github.com/nostr-protocol/nips/blob/master/05.md)
|
||||
- [x] NIP-09: [Event Deletion](https://github.com/nostr-protocol/nips/blob/master/09.md)
|
||||
- [x] NIP-11: [Relay Information Document](https://github.com/nostr-protocol/nips/blob/master/11.md)
|
||||
- [x] NIP-12: [Generic Tag Queries](https://github.com/nostr-protocol/nips/blob/master/12.md)
|
||||
- [x] NIP-15: [End of Stored Events Notice](https://github.com/nostr-protocol/nips/blob/master/15.md)
|
||||
- [x] NIP-16: [Event Treatment](https://github.com/nostr-protocol/nips/blob/master/16.md)
|
||||
- [x] NIP-20: [Command Results](https://github.com/nostr-protocol/nips/blob/master/20.md)
|
||||
- [x] NIP-22: [Event `created_at` limits](https://github.com/nostr-protocol/nips/blob/master/22.md) (_future-dated events only_)
|
||||
- [x] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md)
|
||||
- [x] NIP-28: [Public Chat](https://github.com/nostr-protocol/nips/blob/master/28.md)
|
||||
|
||||
## Quick Start
|
||||
|
||||
The provided `Dockerfile` will compile and build the server application. Use a bind mount to store the SQLite database outside of the container image, and map the container's 8080 port to a host port (8090 in the example below).
|
||||
The provided `Dockerfile` will compile and build the server
|
||||
application. Use a bind mount to store the SQLite database outside of
|
||||
the container image, and map the container's 8080 port to a host port
|
||||
(7000 in the example below).
|
||||
|
||||
The examples below start a rootless podman container, mapping a local
|
||||
data directory and config file.
|
||||
|
||||
```console
|
||||
$ docker build -t nostr-rs-relay .
|
||||
$ docker run -p 8090:8080 --mount src=$(pwd)/nostr_data,target=/usr/src/app/db,type=bind nostr-rs-relay
|
||||
[2021-12-12T04:20:47Z INFO nostr_rs_relay] Listening on: 0.0.0.0:8080
|
||||
[2021-12-12T04:20:47Z INFO nostr_rs_relay::db] Opened database for writing
|
||||
[2021-12-12T04:20:47Z INFO nostr_rs_relay::db] init completed
|
||||
$ podman build -t nostr-rs-relay .
|
||||
|
||||
$ mkdir data
|
||||
|
||||
$ podman unshare chown 100:100 data
|
||||
|
||||
$ podman run -it --rm -p 7000:8080 \
|
||||
--user=100:100 \
|
||||
-v $(pwd)/data:/usr/src/app/db:Z \
|
||||
-v $(pwd)/config.toml:/usr/src/app/config.toml:ro,Z \
|
||||
--name nostr-relay nostr-rs-relay:latest
|
||||
|
||||
Nov 19 15:31:15.013 INFO nostr_rs_relay: Starting up from main
|
||||
Nov 19 15:31:15.017 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080
|
||||
Nov 19 15:31:15.019 INFO nostr_rs_relay::server: db writer created
|
||||
Nov 19 15:31:15.019 INFO nostr_rs_relay::server: control message listener started
|
||||
Nov 19 15:31:15.019 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=4)
|
||||
Nov 19 15:31:15.019 INFO nostr_rs_relay::db: opened database "/usr/src/app/db/nostr.db" for writing
|
||||
Nov 19 15:31:15.019 INFO nostr_rs_relay::schema: DB version = 0
|
||||
Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: database pragma/schema initialized to v7, and ready
|
||||
Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: All migration scripts completed successfully. Welcome to v7.
|
||||
Nov 19 15:31:15.521 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=128)
|
||||
```
|
||||
|
||||
Use a `nostr` client such as [`noscl`](https://github.com/fiatjaf/noscl) to publish and query events.
|
||||
Use a `nostr` client such as
|
||||
[`noscl`](https://github.com/fiatjaf/noscl) to publish and query
|
||||
events.
|
||||
|
||||
```console
|
||||
$ noscl publish "hello world"
|
||||
@@ -31,6 +79,72 @@ Text Note [81cf...2652] from 296a...9b92 5 seconds ago
|
||||
hello world
|
||||
```
|
||||
|
||||
A pre-built container is also available on DockerHub:
|
||||
https://hub.docker.com/r/scsibug/nostr-rs-relay
|
||||
|
||||
## Build and Run (without Docker)
|
||||
|
||||
Building `nostr-rs-relay` requires an installation of Cargo & Rust: https://www.rust-lang.org/tools/install
|
||||
|
||||
Clone this repository, and then build a release version of the relay:
|
||||
|
||||
```console
|
||||
$ git clone -q https://git.sr.ht/\~gheartsfield/nostr-rs-relay
|
||||
$ cd nostr-rs-relay
|
||||
$ cargo build -q -r
|
||||
```
|
||||
|
||||
The relay executable is now located in
|
||||
`target/release/nostr-rs-relay`. In order to run it with logging
|
||||
enabled, execute it with the `RUST_LOG` variable set:
|
||||
|
||||
```console
|
||||
$ RUST_LOG=warn,nostr_rs_relay=info ./target/release/nostr-rs-relay
|
||||
Dec 26 10:31:56.455 INFO nostr_rs_relay: Starting up from main
|
||||
Dec 26 10:31:56.464 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080
|
||||
Dec 26 10:31:56.466 INFO nostr_rs_relay::server: db writer created
|
||||
Dec 26 10:31:56.466 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=2)
|
||||
Dec 26 10:31:56.466 INFO nostr_rs_relay::db: opened database "./nostr.db" for writing
|
||||
Dec 26 10:31:56.466 INFO nostr_rs_relay::schema: DB version = 11
|
||||
Dec 26 10:31:56.467 INFO nostr_rs_relay::db: Built a connection pool "maintenance writer" (min=1, max=2)
|
||||
Dec 26 10:31:56.467 INFO nostr_rs_relay::server: control message listener started
|
||||
Dec 26 10:31:56.468 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=8)
|
||||
```
|
||||
|
||||
You now have a running relay, on port `8080`. Use a `nostr` client or
|
||||
`websocat` to connect and send/query for events.
|
||||
|
||||
## Configuration
|
||||
|
||||
The sample [`config.toml`](config.toml) file demonstrates the
|
||||
configuration available to the relay. This file is optional, but may
|
||||
be mounted into a docker container like so:
|
||||
|
||||
```console
|
||||
$ docker run -it -p 7000:8080 \
|
||||
--mount src=$(pwd)/config.toml,target=/usr/src/app/config.toml,type=bind \
|
||||
--mount src=$(pwd)/data,target=/usr/src/app/db,type=bind \
|
||||
nostr-rs-relay
|
||||
```
|
||||
|
||||
Options include rate-limiting, event size limits, and network address
|
||||
settings.
|
||||
|
||||
## Reverse Proxy Configuration
|
||||
|
||||
For examples of putting the relay behind a reverse proxy (for TLS
|
||||
termination, load balancing, and other features), see [Reverse
|
||||
Proxy](reverse-proxy.md).
|
||||
|
||||
## Dev Channel
|
||||
|
||||
For development discussions, please feel free to use the [sourcehut
|
||||
mailing list](https://lists.sr.ht/~gheartsfield/nostr-rs-relay-devel).
|
||||
Or, drop by the [Nostr Telegram Channel](https://t.me/nostr_protocol).
|
||||
|
||||
To chat about `nostr-rs-relay` on `nostr` itself; visit our channel on [anigma](https://anigma.io/) or another client that supports [NIP-28](https://github.com/nostr-protocol/nips/blob/master/28.md) chats:
|
||||
* `2ad246a094fee48c6e455dd13d759d5f41b5a233120f5719d81ebc1935075194`
|
||||
|
||||
License
|
||||
---
|
||||
This project is MIT licensed.
|
||||
|
136
config.toml
Normal file
136
config.toml
Normal file
@@ -0,0 +1,136 @@
|
||||
# Nostr-rs-relay configuration
|
||||
|
||||
[info]
|
||||
# The advertised URL for the Nostr websocket.
|
||||
relay_url = "wss://nostr.example.com/"
|
||||
|
||||
# Relay information for clients. Put your unique server name here.
|
||||
name = "nostr-rs-relay"
|
||||
|
||||
# Description
|
||||
description = "A newly created nostr-rs-relay.\n\nCustomize this with your own info."
|
||||
|
||||
# Administrative contact pubkey
|
||||
#pubkey = "0c2d168a4ae8ca58c9f1ab237b5df682599c6c7ab74307ea8b05684b60405d41"
|
||||
|
||||
# Administrative contact URI
|
||||
#contact = "mailto:contact@example.com"
|
||||
|
||||
[diagnostics]
|
||||
# Enable tokio tracing (for use with tokio-console)
|
||||
#tracing = true
|
||||
|
||||
[database]
|
||||
# Directory for SQLite files. Defaults to the current directory. Can
|
||||
# also be specified (and overriden) with the "--db dirname" command
|
||||
# line option.
|
||||
data_directory = "."
|
||||
|
||||
|
||||
# Use an in-memory database instead of 'nostr.db'.
|
||||
# Caution; this will not survive a process restart!
|
||||
#in_memory = false
|
||||
|
||||
# Database connection pool settings for subscribers:
|
||||
|
||||
# Minimum number of SQLite reader connections
|
||||
#min_conn = 4
|
||||
|
||||
# Maximum number of SQLite reader connections. Recommend setting this
|
||||
# to approx the number of cores.
|
||||
#max_conn = 8
|
||||
|
||||
[network]
|
||||
# Bind to this network address
|
||||
address = "0.0.0.0"
|
||||
|
||||
# Listen on this port
|
||||
port = 8080
|
||||
|
||||
# If present, read this HTTP header for logging client IP addresses.
|
||||
# Examples for common proxies, cloudflare:
|
||||
#remote_ip_header = "x-forwarded-for"
|
||||
#remote_ip_header = "cf-connecting-ip"
|
||||
|
||||
# Websocket ping interval in seconds, defaults to 5 minutes
|
||||
#ping_interval = 300
|
||||
|
||||
[options]
|
||||
# Reject events that have timestamps greater than this many seconds in
|
||||
# the future. Recommended to reject anything greater than 30 minutes
|
||||
# from the current time, but the default is to allow any date.
|
||||
reject_future_seconds = 1800
|
||||
|
||||
[limits]
|
||||
# Limit events created per second, averaged over one minute. Must be
|
||||
# an integer. If not set (or set to 0), defaults to unlimited. Note:
|
||||
# this is for the server as a whole, not per-connection.
|
||||
# messages_per_sec = 0
|
||||
|
||||
# Limit client subscriptions created per second, averaged over one
|
||||
# minute. Must be an integer. If not set (or set to 0), defaults to
|
||||
# unlimited.
|
||||
#subscriptions_per_min = 0
|
||||
|
||||
# UNIMPLEMENTED...
|
||||
# Limit how many concurrent database connections a client can have.
|
||||
# This prevents a single client from starting too many expensive
|
||||
# database queries. Must be an integer. If not set (or set to 0),
|
||||
# defaults to unlimited (subject to subscription limits).
|
||||
#db_conns_per_client = 0
|
||||
|
||||
# Limit blocking threads used for database connections. Defaults to 16.
|
||||
#max_blocking_threads = 16
|
||||
|
||||
# Limit the maximum size of an EVENT message. Defaults to 128 KB.
|
||||
# Set to 0 for unlimited.
|
||||
#max_event_bytes = 131072
|
||||
|
||||
# Maximum WebSocket message in bytes. Defaults to 128 KB.
|
||||
#max_ws_message_bytes = 131072
|
||||
|
||||
# Maximum WebSocket frame size in bytes. Defaults to 128 KB.
|
||||
#max_ws_frame_bytes = 131072
|
||||
|
||||
# Broadcast buffer size, in number of events. This prevents slow
|
||||
# readers from consuming memory.
|
||||
#broadcast_buffer = 16384
|
||||
|
||||
# Event persistence buffer size, in number of events. This provides
|
||||
# backpressure to senders if writes are slow.
|
||||
#event_persist_buffer = 4096
|
||||
|
||||
[authorization]
|
||||
# Pubkey addresses in this array are whitelisted for event publishing.
|
||||
# Only valid events by these authors will be accepted, if the variable
|
||||
# is set.
|
||||
#pubkey_whitelist = [
|
||||
# "35d26e4690cbe1a898af61cc3515661eb5fa763b57bd0b42e45099c8b32fd50f",
|
||||
# "887645fef0ce0c3c1218d2f5d8e6132a19304cdc57cd20281d082f38cfea0072",
|
||||
#]
|
||||
|
||||
[verified_users]
|
||||
# NIP-05 verification of users. Can be "enabled" to require NIP-05
|
||||
# metadata for event authors, "passive" to perform validation but
|
||||
# never block publishing, or "disabled" to do nothing.
|
||||
#mode = "disabled"
|
||||
|
||||
# Domain names that will be prevented from publishing events.
|
||||
#domain_blacklist = ["wellorder.net"]
|
||||
|
||||
# Domain names that are allowed to publish events. If defined, only
|
||||
# events NIP-05 verified authors at these domains are persisted.
|
||||
#domain_whitelist = ["example.com"]
|
||||
|
||||
# Consider an pubkey "verified" if we have a successful validation
|
||||
# from the NIP-05 domain within this amount of time. Note, if the
|
||||
# domain provides a successful response that omits the account,
|
||||
# verification is immediately revoked.
|
||||
#verify_expiration = "1 week"
|
||||
|
||||
# How long to wait between verification attempts for a specific author.
|
||||
#verify_update_frequency = "24 hours"
|
||||
|
||||
# How many consecutive failed checks before we give up on verifying
|
||||
# this author.
|
||||
#max_consecutive_failures = 20
|
248
docs/user-verification-nip05.md
Normal file
248
docs/user-verification-nip05.md
Normal file
@@ -0,0 +1,248 @@
|
||||
# Author Verification Design Document
|
||||
|
||||
The relay will use NIP-05 DNS-based author verification to limit which
|
||||
authors can publish events to a relay. This document describes how
|
||||
this feature will operate.
|
||||
|
||||
## Considerations
|
||||
|
||||
DNS-based author verification is designed to be deployed in relays that
|
||||
want to prevent spam, so there should be strong protections to prevent
|
||||
unauthorized authors from persisting data. This includes data needed to
|
||||
verify new authors.
|
||||
|
||||
There should be protections in place to ensure the relay cannot be
|
||||
used to spam or flood other webservers. Additionally, there should be
|
||||
protections against server-side request forgery (SSRF).
|
||||
|
||||
## Design Overview
|
||||
|
||||
### Concepts
|
||||
|
||||
All authors are initially "unverified". Unverified authors that submit
|
||||
appropriate `NIP-05` metadata events become "candidates" for
|
||||
verification. A candidate author becomes verified when the relay
|
||||
inspects a kind `0` metadata event for the author with a `nip05` field,
|
||||
and follows the procedure in `NIP-05` to successfully associate the
|
||||
author with an internet identifier.
|
||||
|
||||
The `NIP-05` procedure verifies an author for a fixed period of time,
|
||||
configurable by the relay operator. If this "verification expiration
|
||||
time" (`verify_expiration`) is exceeded without being refreshed, they
|
||||
are once again unverified.
|
||||
|
||||
Verified authors have their status regularly and automatically updated
|
||||
through scheduled polling to their verified domain, this process is
|
||||
"re-verification". It is performed based on the configuration setting
|
||||
`verify_update_frequency`, which defines how long the relay waits
|
||||
between verification attempts (whether the result was success or
|
||||
failure).
|
||||
|
||||
Authors may change their verification data (the internet identifier from
|
||||
`NIP-05`) with a new metadata event, which then requires
|
||||
re-verification. Their old verification remains valid until
|
||||
expiration.
|
||||
|
||||
Performing candidate author verification is a best-effort activity and
|
||||
may be significantly rate-limited to prevent relays being used to
|
||||
attack other hosts. Candidate verification (untrusted authors) should
|
||||
never impact re-verification (trusted authors).
|
||||
|
||||
## Operating Modes
|
||||
|
||||
The relay may operate in one of three modes. "Disabled" performs no
|
||||
validation activities, and will never permit or deny events based on
|
||||
an author's NIP-05 metadata. "Passive" performs NIP-05 validation,
|
||||
but does not permit or deny events based on the validity or presence
|
||||
of NIP-05 metadata. "Enabled" will require current and valid NIP-05
|
||||
metadata for any events to be persisted. "Enabled" mode will
|
||||
additionally consider domain whitelist/blacklist configuration data to
|
||||
restrict which author's events are persisted.
|
||||
|
||||
## Design Details
|
||||
|
||||
### Data Storage
|
||||
|
||||
Verification is stored in a dedicated table. This tracks:
|
||||
|
||||
* `nip05` identifier
|
||||
* most recent verification timestamp
|
||||
* most recent verification failure timestamp
|
||||
* reference to the metadata event (used for tracking `created_at` and
|
||||
`pubkey`)
|
||||
|
||||
### Event Handling
|
||||
|
||||
All events are first validated to ensure the signature is valid.
|
||||
|
||||
Incoming events of kind _other_ than metadata (kind `0`) submitted by
|
||||
clients will be evaluated as follows.
|
||||
|
||||
* If the event's author has a current verification, the event is
|
||||
persisted as normal.
|
||||
* If the event's author has either no verification, or the
|
||||
verification is expired, the event is rejected.
|
||||
|
||||
If the event is a metadata event, we handle it differently.
|
||||
|
||||
We first determine the verification status of the event's pubkey.
|
||||
|
||||
* If the event author is unverified, AND the event contains a `nip05`
|
||||
key, we consider this a verification candidate.
|
||||
* If the event author is unverified, AND the event does not contain a
|
||||
`nip05` key, this is not a candidate, and the event is dropped.
|
||||
|
||||
* If the event author is verified, AND the event contains a `nip05`
|
||||
key that is identical to the currently stored value, no special
|
||||
action is needed.
|
||||
* If the event author is verified, AND the event contains a different
|
||||
`nip05` than was previously verified, with a more recent timestamp,
|
||||
we need to re-verify.
|
||||
* If the event author is verified, AND the event is missing a `nip05`
|
||||
key, and the event timestamp is more recent than what was verified,
|
||||
we do nothing. The current verification will be allowed to expire.
|
||||
|
||||
### Candidate Verification
|
||||
|
||||
When a candidate verification is requested, a rate limit will be
|
||||
utilized. If the rate limit is exceeded, new candidate verification
|
||||
requests will be dropped. In practice, this is implemented by a
|
||||
size-limited channel that drops events that exceed a threshold.
|
||||
|
||||
Candidates are never persisted in the database.
|
||||
|
||||
### Re-Verification
|
||||
|
||||
Re-verification is straightforward when there has been no change to
|
||||
the `nip05` key. A new request to the `nip05` domain is performed,
|
||||
and if successful, the verification timestamp is updated to the
|
||||
current time. If the request fails due to a timeout or server error,
|
||||
the failure timestamp is updated instead.
|
||||
|
||||
When the the `nip05` key has changed and this event is more recent, we
|
||||
will create a new verification record, and delete all other records
|
||||
for the same name.
|
||||
|
||||
Regarding creating new records vs. updating: We never update the event
|
||||
reference or `nip05` identifier in a verification record. Every update
|
||||
either reset the last failure or last success timestamp.
|
||||
|
||||
### Determining Verification Status
|
||||
|
||||
In determining if an event is from a verified author, the following
|
||||
procedure should be used:
|
||||
|
||||
Join the verification table with the event table, to provide
|
||||
verification data alongside the event `created_at` and `pubkey`
|
||||
metadata. Find the most recent verification record for the author,
|
||||
based on the `created_at` time.
|
||||
|
||||
Reject the record if the success timestamp is not within our
|
||||
configured expiration time.
|
||||
|
||||
Reject records with disallowed domains, based on any whitelists or
|
||||
blacklists in effect.
|
||||
|
||||
If a result remains, the author is treated as verified.
|
||||
|
||||
This does give a time window for authors transitioning their verified
|
||||
status between domains. There may be a period of time in which there
|
||||
are multiple valid rows in the verification table for a given author.
|
||||
|
||||
### Cleaning Up Inactive Verifications
|
||||
|
||||
After a author verification has expired, we will continue to check for
|
||||
it to become valid again. After a configurable number of attempts, we
|
||||
should simply forget it, and reclaim the space.
|
||||
|
||||
### Addition of Domain Whitelist/Blacklist
|
||||
|
||||
A set of whitelisted or blacklisted domains may be provided. If both
|
||||
are provided, only the whitelist is used. In this context, domains
|
||||
are either "allowed" (present on a whitelist and NOT present on a
|
||||
blacklist), or "denied" (NOT present on a whitelist and present on a
|
||||
blacklist).
|
||||
|
||||
The processes outlined so far are modified in the presence of these
|
||||
options:
|
||||
|
||||
* Only authors with allowed domains can become candidates for
|
||||
verification.
|
||||
* Verification status queries additionally filter out any denied
|
||||
domains.
|
||||
* Re-verification processes only proceed with allowed domains.
|
||||
|
||||
### Integration
|
||||
|
||||
We have an existing database writer thread, which receives events and
|
||||
attempts to persist them to disk. Once validated and persisted, these
|
||||
events are broadcast to all subscribers.
|
||||
|
||||
When verification is enabled, the writer must check to ensure a valid,
|
||||
unexpired verification record exists for the auther. All metadata
|
||||
events (regardless of verification status) are forwarded to a verifier
|
||||
module. If the verifier determines a new verification record is
|
||||
needed, it is also responsible for persisting and broadcasting the
|
||||
event, just as the database writer would have done.
|
||||
|
||||
## Threat Scenarios
|
||||
|
||||
Some of these mitigations are fully implemented, others are documented
|
||||
simply to demonstrate a mitigation is possible.
|
||||
|
||||
### Domain Spamming
|
||||
|
||||
*Threat*: A author with a high-volume of events creates a metadata event
|
||||
with a bogus domain, causing the relay to generate significant
|
||||
unwanted traffic to a target.
|
||||
|
||||
*Mitigation*: Rate limiting for all candidate verification will limit
|
||||
external requests to a reasonable amount. Currently, this is a simple
|
||||
delay that slows down the HTTP task.
|
||||
|
||||
### Denial of Service for Legitimate Authors
|
||||
|
||||
*Threat*: A author with a high-volume of events creates a metadata event
|
||||
with a domain that is invalid for them, _but which is used by other
|
||||
legitimate authors_. This triggers rate-limiting against the legitimate
|
||||
domain, and blocks authors from updating their own metadata.
|
||||
|
||||
*Mitigation*: Rate limiting should only apply to candidates, so any
|
||||
existing verified authors have priority for re-verification. New
|
||||
authors will be affected, as we can not distinguish between the threat
|
||||
and a legitimate author. _(Unimplemented)_
|
||||
|
||||
### Denial of Service by Consuming Storage
|
||||
|
||||
*Threat*: A author creates a high volume of random metadata events with
|
||||
unique domains, in order to cause us to store large amounts of data
|
||||
for to-be-verified authors.
|
||||
|
||||
*Mitigation*: No data is stored for candidate authors. This makes it
|
||||
harder for new authors to become verified, but is effective at
|
||||
preventing this attack.
|
||||
|
||||
### Metadata Replay for Verified Author
|
||||
|
||||
*Threat*: Attacker replays out-of-date metadata event for a author, to
|
||||
cause a verification to fail.
|
||||
|
||||
*Mitigation*: New metadata events have their signed timestamp compared
|
||||
against the signed timestamp of the event that has most recently
|
||||
verified them. If the metadata event is older, it is discarded.
|
||||
|
||||
### Server-Side Request Forgery via Metadata
|
||||
|
||||
*Threat*: Attacker includes malicious data in the `nip05` event, which
|
||||
is used to generate HTTP requests against potentially internal
|
||||
resources. Either leaking data, or invoking webservices beyond their
|
||||
own privileges.
|
||||
|
||||
*Mitigation*: Consider detecting and dropping when the `nip05` field
|
||||
is an IP address. Allow the relay operator to utilize the `blacklist`
|
||||
or `whitelist` to constrain hosts that will be contacted. Most
|
||||
importantly, the verification process is hardcoded to only make
|
||||
requests to a known url path
|
||||
(`.well-known/nostr.json?name=<LOCAL_NAME>`). The `<LOCAL_NAME>`
|
||||
component is restricted to a basic ASCII subset (preventing additional
|
||||
URL components).
|
92
reverse-proxy.md
Normal file
92
reverse-proxy.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# Reverse Proxy Setup Guide
|
||||
|
||||
It is recommended to run `nostr-rs-relay` behind a reverse proxy such
|
||||
as `haproxy` or `nginx` to provide TLS termination. Simple examples
|
||||
of `haproxy` and `nginx` configurations are documented here.
|
||||
|
||||
## Minimal HAProxy Configuration
|
||||
|
||||
Assumptions:
|
||||
|
||||
* HAProxy version is `2.4.10` or greater (older versions not tested).
|
||||
* Hostname for the relay is `relay.example.com`.
|
||||
* Your relay should be available over wss://relay.example.com
|
||||
* Your (NIP-11) relay info page should be available on https://relay.example.com
|
||||
* SSL certificate is located in `/etc/certs/example.com.pem`.
|
||||
* Relay is running on port 8080.
|
||||
* Limit connections to 400 concurrent.
|
||||
* HSTS (HTTP Strict Transport Security) is desired.
|
||||
* Only TLS 1.2 or greater is allowed.
|
||||
|
||||
```
|
||||
global
|
||||
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
|
||||
ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
|
||||
|
||||
frontend fe_prod
|
||||
mode http
|
||||
bind :443 ssl crt /etc/certs/example.com.pem alpn h2,http/1.1
|
||||
bind :80
|
||||
http-request set-header X-Forwarded-Proto https if { ssl_fc }
|
||||
redirect scheme https code 301 if !{ ssl_fc }
|
||||
acl host_relay hdr(host) -i relay.example.com
|
||||
use_backend relay if host_relay
|
||||
# HSTS (1 year)
|
||||
http-response set-header Strict-Transport-Security max-age=31536000
|
||||
|
||||
backend relay
|
||||
mode http
|
||||
timeout connect 5s
|
||||
timeout client 50s
|
||||
timeout server 50s
|
||||
timeout tunnel 1h
|
||||
timeout client-fin 30s
|
||||
option tcp-check
|
||||
default-server maxconn 400 check inter 20s fastinter 1s
|
||||
server relay 127.0.0.1:8080
|
||||
```
|
||||
|
||||
### HAProxy Notes
|
||||
|
||||
You may experience WebSocket connection problems with Firefox if
|
||||
HTTP/2 is enabled, for older versions of HAProxy (2.3.x). Either
|
||||
disable HTTP/2 (`h2`), or upgrade HAProxy.
|
||||
|
||||
## Bare-bones Nginx Configuration
|
||||
|
||||
Assumptions:
|
||||
|
||||
* `Nginx` version is `1.18.0` (other versions not tested).
|
||||
* Hostname for the relay is `relay.example.com`.
|
||||
* SSL certificate and key are located at `/etc/letsencrypt/live/relay.example.com/`.
|
||||
* Relay is running on port `8080`.
|
||||
|
||||
```
|
||||
http {
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name relay.example.com;
|
||||
ssl_certificate /etc/letsencrypt/live/relay.example.com/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/relay.example.com/privkey.pem;
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
keepalive_timeout 70;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:8080;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Nginx Notes
|
||||
|
||||
The above configuration was tested on `nginx` `1.18.0` was tested on `Ubuntu 20.04`.
|
||||
|
||||
For help installing `nginx` on `Ubuntu`, see [this guide](https://www.digitalocean.com/community/tutorials/how-to-install-nginx-on-ubuntu-20-04).
|
||||
|
||||
For guidance on using `letsencrypt` to obtain a cert on `Ubuntu`, including an `nginx` plugin, see [this post](https://www.digitalocean.com/community/tutorials/how-to-secure-nginx-with-let-s-encrypt-on-ubuntu-20-04).
|
@@ -1 +1,4 @@
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
#max_width = 140
|
||||
#chain_width = 100
|
||||
#fn_call_width = 100
|
||||
|
14
src/close.rs
14
src/close.rs
@@ -1,9 +1,11 @@
|
||||
//! Subscription close request parsing
|
||||
//!
|
||||
//! Representation and parsing of `CLOSE` messages sent from clients.
|
||||
use crate::error::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Close command in network format
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct CloseCmd {
|
||||
/// Protocol command, expected to always be "CLOSE".
|
||||
cmd: String,
|
||||
@@ -11,8 +13,8 @@ pub struct CloseCmd {
|
||||
id: String,
|
||||
}
|
||||
|
||||
/// Close command parsed
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
/// Identifier of the subscription to be closed.
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Close {
|
||||
/// The subscription identifier being closed.
|
||||
pub id: String,
|
||||
@@ -21,10 +23,10 @@ pub struct Close {
|
||||
impl From<CloseCmd> for Result<Close> {
|
||||
fn from(cc: CloseCmd) -> Result<Close> {
|
||||
// ensure command is correct
|
||||
if cc.cmd != "CLOSE" {
|
||||
Err(Error::CommandUnknownError)
|
||||
} else {
|
||||
if cc.cmd == "CLOSE" {
|
||||
Ok(Close { id: cc.id })
|
||||
} else {
|
||||
Err(Error::CommandUnknownError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
253
src/config.rs
Normal file
253
src/config.rs
Normal file
@@ -0,0 +1,253 @@
|
||||
//! Configuration file and settings management
|
||||
use config::{Config, ConfigError, File};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Duration;
|
||||
use tracing::warn;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[allow(unused)]
|
||||
pub struct Info {
|
||||
pub relay_url: Option<String>,
|
||||
pub name: Option<String>,
|
||||
pub description: Option<String>,
|
||||
pub pubkey: Option<String>,
|
||||
pub contact: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Database {
|
||||
pub data_directory: String,
|
||||
pub in_memory: bool,
|
||||
pub min_conn: u32,
|
||||
pub max_conn: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Network {
|
||||
pub port: u16,
|
||||
pub address: String,
|
||||
pub remote_ip_header: Option<String>, // retrieve client IP from this HTTP header if present
|
||||
pub ping_interval_seconds: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Options {
|
||||
pub reject_future_seconds: Option<usize>, // if defined, reject any events with a timestamp more than X seconds in the future
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Retention {
|
||||
// TODO: implement
|
||||
pub max_events: Option<usize>, // max events
|
||||
pub max_bytes: Option<usize>, // max size
|
||||
pub persist_days: Option<usize>, // oldest message
|
||||
pub whitelist_addresses: Option<Vec<String>>, // whitelisted addresses (never delete)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Limits {
|
||||
pub messages_per_sec: Option<u32>, // Artificially slow down event writing to limit disk consumption (averaged over 1 minute)
|
||||
pub subscriptions_per_min: Option<u32>, // Artificially slow down request (db query) creation to prevent abuse (averaged over 1 minute)
|
||||
pub db_conns_per_client: Option<u32>, // How many concurrent database queries (not subscriptions) may a client have?
|
||||
pub max_blocking_threads: usize,
|
||||
pub max_event_bytes: Option<usize>, // Maximum size of an EVENT message
|
||||
pub max_ws_message_bytes: Option<usize>,
|
||||
pub max_ws_frame_bytes: Option<usize>,
|
||||
pub broadcast_buffer: usize, // events to buffer for subscribers (prevents slow readers from consuming memory)
|
||||
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Authorization {
|
||||
pub pubkey_whitelist: Option<Vec<String>>, // If present, only allow these pubkeys to publish events
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Diagnostics {
|
||||
pub tracing: bool, // enables tokio console-subscriber
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum VerifiedUsersMode {
|
||||
Enabled,
|
||||
Passive,
|
||||
Disabled,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct VerifiedUsers {
|
||||
pub mode: VerifiedUsersMode, // Mode of operation: "enabled" (enforce) or "passive" (check only). If none, this is simply disabled.
|
||||
pub domain_whitelist: Option<Vec<String>>, // If present, only allow verified users from these domains can publish events
|
||||
pub domain_blacklist: Option<Vec<String>>, // If present, allow all verified users from any domain except these
|
||||
pub verify_expiration: Option<String>, // how long a verification is cached for before no longer being used
|
||||
pub verify_update_frequency: Option<String>, // how often to attempt to update verification
|
||||
pub verify_expiration_duration: Option<Duration>, // internal result of parsing verify_expiration
|
||||
pub verify_update_frequency_duration: Option<Duration>, // internal result of parsing verify_update_frequency
|
||||
pub max_consecutive_failures: usize, // maximum number of verification failures in a row, before ceasing future checks
|
||||
}
|
||||
|
||||
impl VerifiedUsers {
|
||||
pub fn init(&mut self) {
|
||||
self.verify_expiration_duration = self.verify_expiration_duration();
|
||||
self.verify_update_frequency_duration = self.verify_update_duration();
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_enabled(&self) -> bool {
|
||||
self.mode == VerifiedUsersMode::Enabled
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_active(&self) -> bool {
|
||||
self.mode == VerifiedUsersMode::Enabled || self.mode == VerifiedUsersMode::Passive
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_passive(&self) -> bool {
|
||||
self.mode == VerifiedUsersMode::Passive
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn verify_expiration_duration(&self) -> Option<Duration> {
|
||||
self.verify_expiration
|
||||
.as_ref()
|
||||
.and_then(|x| parse_duration::parse(x).ok())
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn verify_update_duration(&self) -> Option<Duration> {
|
||||
self.verify_update_frequency
|
||||
.as_ref()
|
||||
.and_then(|x| parse_duration::parse(x).ok())
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_valid(&self) -> bool {
|
||||
self.verify_expiration_duration().is_some() && self.verify_update_duration().is_some()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Settings {
|
||||
pub info: Info,
|
||||
pub diagnostics: Diagnostics,
|
||||
pub database: Database,
|
||||
pub network: Network,
|
||||
pub limits: Limits,
|
||||
pub authorization: Authorization,
|
||||
pub verified_users: VerifiedUsers,
|
||||
pub retention: Retention,
|
||||
pub options: Options,
|
||||
}
|
||||
|
||||
impl Settings {
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
let default_settings = Self::default();
|
||||
// attempt to construct settings with file
|
||||
let from_file = Self::new_from_default(&default_settings);
|
||||
match from_file {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
warn!("Error reading config file ({:?})", e);
|
||||
default_settings
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn new_from_default(default: &Settings) -> Result<Self, ConfigError> {
|
||||
let builder = Config::builder();
|
||||
let config: Config = builder
|
||||
// use defaults
|
||||
.add_source(Config::try_from(default)?)
|
||||
// override with file contents
|
||||
.add_source(File::with_name("config.toml"))
|
||||
.build()?;
|
||||
let mut settings: Settings = config.try_deserialize()?;
|
||||
// ensure connection pool size is logical
|
||||
assert!(
|
||||
settings.database.min_conn <= settings.database.max_conn,
|
||||
"Database min_conn setting ({}) cannot exceed max_conn ({})",
|
||||
settings.database.min_conn,
|
||||
settings.database.max_conn
|
||||
);
|
||||
// ensure durations parse
|
||||
assert!(
|
||||
settings.verified_users.is_valid(),
|
||||
"VerifiedUsers time settings could not be parsed"
|
||||
);
|
||||
// initialize durations for verified users
|
||||
settings.verified_users.init();
|
||||
Ok(settings)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Settings {
|
||||
fn default() -> Self {
|
||||
Settings {
|
||||
info: Info {
|
||||
relay_url: None,
|
||||
name: Some("Unnamed nostr-rs-relay".to_owned()),
|
||||
description: None,
|
||||
pubkey: None,
|
||||
contact: None,
|
||||
},
|
||||
diagnostics: Diagnostics { tracing: false },
|
||||
database: Database {
|
||||
data_directory: ".".to_owned(),
|
||||
in_memory: false,
|
||||
min_conn: 4,
|
||||
max_conn: 8,
|
||||
},
|
||||
network: Network {
|
||||
port: 8080,
|
||||
ping_interval_seconds: 300,
|
||||
address: "0.0.0.0".to_owned(),
|
||||
remote_ip_header: None,
|
||||
},
|
||||
limits: Limits {
|
||||
messages_per_sec: None,
|
||||
subscriptions_per_min: None,
|
||||
db_conns_per_client: None,
|
||||
max_blocking_threads: 16,
|
||||
max_event_bytes: Some(2 << 17), // 128K
|
||||
max_ws_message_bytes: Some(2 << 17), // 128K
|
||||
max_ws_frame_bytes: Some(2 << 17), // 128K
|
||||
broadcast_buffer: 16384,
|
||||
event_persist_buffer: 4096,
|
||||
},
|
||||
authorization: Authorization {
|
||||
pubkey_whitelist: None, // Allow any address to publish
|
||||
},
|
||||
verified_users: VerifiedUsers {
|
||||
mode: VerifiedUsersMode::Disabled,
|
||||
domain_whitelist: None,
|
||||
domain_blacklist: None,
|
||||
verify_expiration: Some("1 week".to_owned()),
|
||||
verify_update_frequency: Some("1 day".to_owned()),
|
||||
verify_expiration_duration: None,
|
||||
verify_update_frequency_duration: None,
|
||||
max_consecutive_failures: 20,
|
||||
},
|
||||
retention: Retention {
|
||||
max_events: None, // max events
|
||||
max_bytes: None, // max size
|
||||
persist_days: None, // oldest message
|
||||
whitelist_addresses: None, // whitelisted addresses (never delete)
|
||||
},
|
||||
options: Options {
|
||||
reject_future_seconds: None, // Reject events in the future if defined
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
63
src/conn.rs
63
src/conn.rs
@@ -2,11 +2,10 @@
|
||||
use crate::close::Close;
|
||||
use crate::error::Error;
|
||||
use crate::error::Result;
|
||||
use crate::event::Event;
|
||||
|
||||
use crate::subscription::Subscription;
|
||||
use log::*;
|
||||
use std::collections::HashMap;
|
||||
use tracing::{debug, trace};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// A subscription identifier has a maximum length
|
||||
@@ -14,6 +13,8 @@ const MAX_SUBSCRIPTION_ID_LEN: usize = 256;
|
||||
|
||||
/// State for a client connection
|
||||
pub struct ClientConn {
|
||||
/// Client IP (either from socket, or configured proxy header
|
||||
client_ip: String,
|
||||
/// Unique client identifier generated at connection time
|
||||
client_id: Uuid,
|
||||
/// The current set of active client subscriptions
|
||||
@@ -24,46 +25,56 @@ pub struct ClientConn {
|
||||
|
||||
impl Default for ClientConn {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
Self::new("unknown".to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
impl ClientConn {
|
||||
/// Create a new, empty connection state.
|
||||
pub fn new() -> Self {
|
||||
#[must_use]
|
||||
pub fn new(client_ip: String) -> Self {
|
||||
let client_id = Uuid::new_v4();
|
||||
ClientConn {
|
||||
client_ip,
|
||||
client_id,
|
||||
subscriptions: HashMap::new(),
|
||||
max_subs: 32,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
|
||||
&self.subscriptions
|
||||
}
|
||||
|
||||
/// Check if the given subscription already exists
|
||||
pub fn has_subscription(&self, sub: &Subscription) -> bool {
|
||||
self.subscriptions.values().any(|x| x == sub)
|
||||
}
|
||||
|
||||
/// Get a short prefix of the client's unique identifier, suitable
|
||||
/// for logging.
|
||||
#[must_use]
|
||||
pub fn get_client_prefix(&self) -> String {
|
||||
self.client_id.to_string().chars().take(8).collect()
|
||||
}
|
||||
|
||||
/// Find all matching subscriptions.
|
||||
pub fn get_matching_subscriptions(&self, e: &Event) -> Vec<&str> {
|
||||
let mut v: Vec<&str> = vec![];
|
||||
for (id, sub) in self.subscriptions.iter() {
|
||||
if sub.interested_in_event(e) {
|
||||
v.push(id);
|
||||
}
|
||||
}
|
||||
return v;
|
||||
#[must_use]
|
||||
pub fn ip(&self) -> &str {
|
||||
&self.client_ip
|
||||
}
|
||||
|
||||
/// Add a new subscription for this connection.
|
||||
/// # Errors
|
||||
///
|
||||
/// Will return `Err` if the client has too many subscriptions, or
|
||||
/// if the provided name is excessively long.
|
||||
pub fn subscribe(&mut self, s: Subscription) -> Result<()> {
|
||||
let k = s.get_id();
|
||||
let sub_id_len = k.len();
|
||||
// prevent arbitrarily long subscription identifiers from
|
||||
// being used.
|
||||
if sub_id_len > MAX_SUBSCRIPTION_ID_LEN {
|
||||
info!(
|
||||
debug!(
|
||||
"ignoring sub request with excessive length: ({})",
|
||||
sub_id_len
|
||||
);
|
||||
@@ -72,8 +83,12 @@ impl ClientConn {
|
||||
// check if an existing subscription exists, and replace if so
|
||||
if self.subscriptions.contains_key(&k) {
|
||||
self.subscriptions.remove(&k);
|
||||
self.subscriptions.insert(k, s);
|
||||
debug!("replaced existing subscription");
|
||||
self.subscriptions.insert(k, s.clone());
|
||||
trace!(
|
||||
"replaced existing subscription (cid: {}, sub: {:?})",
|
||||
self.get_client_prefix(),
|
||||
s.get_id()
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -83,20 +98,22 @@ impl ClientConn {
|
||||
}
|
||||
// add subscription
|
||||
self.subscriptions.insert(k, s);
|
||||
debug!(
|
||||
"registered new subscription, currently have {} active subs",
|
||||
self.subscriptions.len()
|
||||
trace!(
|
||||
"registered new subscription, currently have {} active subs (cid: {})",
|
||||
self.subscriptions.len(),
|
||||
self.get_client_prefix(),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove the subscription for this connection.
|
||||
pub fn unsubscribe(&mut self, c: Close) {
|
||||
pub fn unsubscribe(&mut self, c: &Close) {
|
||||
// TODO: return notice if subscription did not exist.
|
||||
self.subscriptions.remove(&c.id);
|
||||
debug!(
|
||||
"removed subscription, currently have {} active subs",
|
||||
self.subscriptions.len()
|
||||
trace!(
|
||||
"removed subscription, currently have {} active subs (cid: {})",
|
||||
self.subscriptions.len(),
|
||||
self.get_client_prefix(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
403
src/delegation.rs
Normal file
403
src/delegation.rs
Normal file
@@ -0,0 +1,403 @@
|
||||
//! Event parsing and validation
|
||||
use crate::error::Error;
|
||||
use crate::error::Result;
|
||||
use crate::event::Event;
|
||||
use bitcoin_hashes::{sha256, Hash};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::str::FromStr;
|
||||
use tracing::{debug, info};
|
||||
|
||||
// This handles everything related to delegation, in particular the
|
||||
// condition/rune parsing and logic.
|
||||
|
||||
// Conditions are poorly specified, so we will implement the minimum
|
||||
// necessary for now.
|
||||
|
||||
// fields MUST be either "kind" or "created_at".
|
||||
// operators supported are ">", "<", "=", "!".
|
||||
// no operations on 'content' are supported.
|
||||
|
||||
// this allows constraints for:
|
||||
// valid date ranges (valid from X->Y dates).
|
||||
// specific kinds (publish kind=1,5)
|
||||
// kind ranges (publish ephemeral events, kind>19999&kind<30001)
|
||||
|
||||
// for more complex scenarios (allow delegatee to publish ephemeral
|
||||
// AND replacement events), it may be necessary to generate and use
|
||||
// different condition strings, since we do not support grouping or
|
||||
// "OR" logic.
|
||||
|
||||
lazy_static! {
|
||||
/// Secp256k1 verification instance.
|
||||
pub static ref SECP: Secp256k1<VerifyOnly> = Secp256k1::verification_only();
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub enum Field {
|
||||
Kind,
|
||||
CreatedAt,
|
||||
}
|
||||
|
||||
impl FromStr for Field {
|
||||
type Err = Error;
|
||||
fn from_str(value: &str) -> Result<Self, Self::Err> {
|
||||
if value == "kind" {
|
||||
Ok(Field::Kind)
|
||||
} else if value == "created_at" {
|
||||
Ok(Field::CreatedAt)
|
||||
} else {
|
||||
Err(Error::DelegationParseError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub enum Operator {
|
||||
LessThan,
|
||||
GreaterThan,
|
||||
Equals,
|
||||
NotEquals,
|
||||
}
|
||||
impl FromStr for Operator {
|
||||
type Err = Error;
|
||||
fn from_str(value: &str) -> Result<Self, Self::Err> {
|
||||
if value == "<" {
|
||||
Ok(Operator::LessThan)
|
||||
} else if value == ">" {
|
||||
Ok(Operator::GreaterThan)
|
||||
} else if value == "=" {
|
||||
Ok(Operator::Equals)
|
||||
} else if value == "!" {
|
||||
Ok(Operator::NotEquals)
|
||||
} else {
|
||||
Err(Error::DelegationParseError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct ConditionQuery {
|
||||
pub conditions: Vec<Condition>,
|
||||
}
|
||||
|
||||
impl ConditionQuery {
|
||||
pub fn allows_event(&self, event: &Event) -> bool {
|
||||
// check each condition, to ensure that the event complies
|
||||
// with the restriction.
|
||||
for c in &self.conditions {
|
||||
if !c.allows_event(event) {
|
||||
// any failing conditions invalidates the delegation
|
||||
// on this event
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// delegation was permitted unconditionally, or all conditions
|
||||
// were true
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that the delegator approved the delegation; return a ConditionQuery if so.
|
||||
pub fn validate_delegation(
|
||||
delegator: &str,
|
||||
delegatee: &str,
|
||||
cond_query: &str,
|
||||
sigstr: &str,
|
||||
) -> Option<ConditionQuery> {
|
||||
// form the token
|
||||
let tok = format!("nostr:delegation:{}:{}", delegatee, cond_query);
|
||||
// form SHA256 hash
|
||||
let digest: sha256::Hash = sha256::Hash::hash(tok.as_bytes());
|
||||
let sig = schnorr::Signature::from_str(sigstr).unwrap();
|
||||
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
|
||||
if let Ok(pubkey) = XOnlyPublicKey::from_str(delegator) {
|
||||
let verify = SECP.verify_schnorr(&sig, &msg, &pubkey);
|
||||
if verify.is_ok() {
|
||||
// return the parsed condition query
|
||||
cond_query.parse::<ConditionQuery>().ok()
|
||||
} else {
|
||||
debug!("client sent an delegation signature that did not validate");
|
||||
None
|
||||
}
|
||||
} else {
|
||||
debug!("client sent malformed delegation pubkey");
|
||||
None
|
||||
}
|
||||
} else {
|
||||
info!("error converting delegation digest to secp256k1 message");
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Parsed delegation condition
|
||||
/// see https://github.com/nostr-protocol/nips/pull/28#pullrequestreview-1084903800
|
||||
/// An example complex condition would be: kind=1,2,3&created_at<1665265999
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Condition {
|
||||
pub field: Field,
|
||||
pub operator: Operator,
|
||||
pub values: Vec<u64>,
|
||||
}
|
||||
|
||||
impl Condition {
|
||||
/// Check if this condition allows the given event to be delegated
|
||||
pub fn allows_event(&self, event: &Event) -> bool {
|
||||
// determine what the right-hand side of the operator is
|
||||
let resolved_field = match &self.field {
|
||||
Field::Kind => event.kind,
|
||||
Field::CreatedAt => event.created_at,
|
||||
};
|
||||
match &self.operator {
|
||||
Operator::LessThan => {
|
||||
// the less-than operator is only valid for single values.
|
||||
if self.values.len() == 1 {
|
||||
if let Some(v) = self.values.first() {
|
||||
return resolved_field < *v;
|
||||
}
|
||||
}
|
||||
}
|
||||
Operator::GreaterThan => {
|
||||
// the greater-than operator is only valid for single values.
|
||||
if self.values.len() == 1 {
|
||||
if let Some(v) = self.values.first() {
|
||||
return resolved_field > *v;
|
||||
}
|
||||
}
|
||||
}
|
||||
Operator::Equals => {
|
||||
// equals is interpreted as "must be equal to at least one provided value"
|
||||
return self.values.iter().any(|&x| resolved_field == x);
|
||||
}
|
||||
Operator::NotEquals => {
|
||||
// not-equals is interpreted as "must not be equal to any provided value"
|
||||
// this is the one case where an empty list of values could be allowed; even though it is a pointless restriction.
|
||||
return self.values.iter().all(|&x| resolved_field != x);
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn str_to_condition(cs: &str) -> Option<Condition> {
|
||||
// a condition is a string (alphanum+underscore), an operator (<>=!), and values (num+comma)
|
||||
lazy_static! {
|
||||
static ref RE: Regex = Regex::new("([[:word:]]+)([<>=!]+)([,[[:digit:]]]*)").unwrap();
|
||||
}
|
||||
// match against the regex
|
||||
let caps = RE.captures(cs)?;
|
||||
let field = caps.get(1)?.as_str().parse::<Field>().ok()?;
|
||||
let operator = caps.get(2)?.as_str().parse::<Operator>().ok()?;
|
||||
// values are just comma separated numbers, but all must be parsed
|
||||
let rawvals = caps.get(3)?.as_str();
|
||||
let values = rawvals
|
||||
.split_terminator(',')
|
||||
.map(|n| n.parse::<u64>().ok())
|
||||
.collect::<Option<Vec<_>>>()?;
|
||||
// convert field string into Field
|
||||
Some(Condition {
|
||||
field,
|
||||
operator,
|
||||
values,
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse a condition query from a string slice
|
||||
impl FromStr for ConditionQuery {
|
||||
type Err = Error;
|
||||
fn from_str(value: &str) -> Result<Self, Self::Err> {
|
||||
// split the string with '&'
|
||||
let mut conditions = vec![];
|
||||
let condstrs = value.split_terminator('&');
|
||||
// parse each individual condition
|
||||
for c in condstrs {
|
||||
conditions.push(str_to_condition(c).ok_or(Error::DelegationParseError)?);
|
||||
}
|
||||
Ok(ConditionQuery { conditions })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// parse condition strings
|
||||
#[test]
|
||||
fn parse_empty() -> Result<()> {
|
||||
// given an empty condition query, produce an empty vector
|
||||
let empty_cq = ConditionQuery { conditions: vec![] };
|
||||
let parsed = "".parse::<ConditionQuery>()?;
|
||||
assert_eq!(parsed, empty_cq);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// parse field 'kind'
|
||||
#[test]
|
||||
fn test_kind_field_parse() -> Result<()> {
|
||||
let field = "kind".parse::<Field>()?;
|
||||
assert_eq!(field, Field::Kind);
|
||||
Ok(())
|
||||
}
|
||||
// parse field 'created_at'
|
||||
#[test]
|
||||
fn test_created_at_field_parse() -> Result<()> {
|
||||
let field = "created_at".parse::<Field>()?;
|
||||
assert_eq!(field, Field::CreatedAt);
|
||||
Ok(())
|
||||
}
|
||||
// parse unknown field
|
||||
#[test]
|
||||
fn unknown_field_parse() {
|
||||
let field = "unk".parse::<Field>();
|
||||
assert!(field.is_err());
|
||||
}
|
||||
|
||||
// parse a full conditional query with an empty array
|
||||
#[test]
|
||||
fn parse_kind_equals_empty() -> Result<()> {
|
||||
// given an empty condition query, produce an empty vector
|
||||
let kind_cq = ConditionQuery {
|
||||
conditions: vec![Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::Equals,
|
||||
values: vec![],
|
||||
}],
|
||||
};
|
||||
let parsed = "kind=".parse::<ConditionQuery>()?;
|
||||
assert_eq!(parsed, kind_cq);
|
||||
Ok(())
|
||||
}
|
||||
// parse a full conditional query with a single value
|
||||
#[test]
|
||||
fn parse_kind_equals_singleval() -> Result<()> {
|
||||
// given an empty condition query, produce an empty vector
|
||||
let kind_cq = ConditionQuery {
|
||||
conditions: vec![Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::Equals,
|
||||
values: vec![1],
|
||||
}],
|
||||
};
|
||||
let parsed = "kind=1".parse::<ConditionQuery>()?;
|
||||
assert_eq!(parsed, kind_cq);
|
||||
Ok(())
|
||||
}
|
||||
// parse a full conditional query with multiple values
|
||||
#[test]
|
||||
fn parse_kind_equals_multival() -> Result<()> {
|
||||
// given an empty condition query, produce an empty vector
|
||||
let kind_cq = ConditionQuery {
|
||||
conditions: vec![Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::Equals,
|
||||
values: vec![1, 2, 4],
|
||||
}],
|
||||
};
|
||||
let parsed = "kind=1,2,4".parse::<ConditionQuery>()?;
|
||||
assert_eq!(parsed, kind_cq);
|
||||
Ok(())
|
||||
}
|
||||
// parse multiple conditions
|
||||
#[test]
|
||||
fn parse_multi_conditions() -> Result<()> {
|
||||
// given an empty condition query, produce an empty vector
|
||||
let cq = ConditionQuery {
|
||||
conditions: vec![
|
||||
Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::GreaterThan,
|
||||
values: vec![10000],
|
||||
},
|
||||
Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::LessThan,
|
||||
values: vec![20000],
|
||||
},
|
||||
Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::NotEquals,
|
||||
values: vec![10001],
|
||||
},
|
||||
Condition {
|
||||
field: Field::CreatedAt,
|
||||
operator: Operator::LessThan,
|
||||
values: vec![1665867123],
|
||||
},
|
||||
],
|
||||
};
|
||||
let parsed =
|
||||
"kind>10000&kind<20000&kind!10001&created_at<1665867123".parse::<ConditionQuery>()?;
|
||||
assert_eq!(parsed, cq);
|
||||
Ok(())
|
||||
}
|
||||
// Check for condition logic on event w/ empty values
|
||||
#[test]
|
||||
fn condition_with_empty_values() {
|
||||
let mut c = Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::GreaterThan,
|
||||
values: vec![],
|
||||
};
|
||||
let e = Event::simple_event();
|
||||
assert!(!c.allows_event(&e));
|
||||
c.operator = Operator::LessThan;
|
||||
assert!(!c.allows_event(&e));
|
||||
c.operator = Operator::Equals;
|
||||
assert!(!c.allows_event(&e));
|
||||
// Not Equals applied to an empty list *is* allowed
|
||||
// (pointless, but logically valid).
|
||||
c.operator = Operator::NotEquals;
|
||||
assert!(c.allows_event(&e));
|
||||
}
|
||||
|
||||
// Check for condition logic on event w/ single value
|
||||
#[test]
|
||||
fn condition_kind_gt_event_single() {
|
||||
let c = Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::GreaterThan,
|
||||
values: vec![10],
|
||||
};
|
||||
let mut e = Event::simple_event();
|
||||
// kind is not greater than 10, not allowed
|
||||
e.kind = 1;
|
||||
assert!(!c.allows_event(&e));
|
||||
// kind is greater than 10, allowed
|
||||
e.kind = 100;
|
||||
assert!(c.allows_event(&e));
|
||||
// kind is 10, not allowed
|
||||
e.kind = 10;
|
||||
assert!(!c.allows_event(&e));
|
||||
}
|
||||
// Check for condition logic on event w/ multi values
|
||||
#[test]
|
||||
fn condition_with_multi_values() {
|
||||
let mut c = Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::Equals,
|
||||
values: vec![0, 10, 20],
|
||||
};
|
||||
let mut e = Event::simple_event();
|
||||
// Allow if event kind is in list for Equals
|
||||
e.kind = 10;
|
||||
assert!(c.allows_event(&e));
|
||||
// Deny if event kind is not in list for Equals
|
||||
e.kind = 11;
|
||||
assert!(!c.allows_event(&e));
|
||||
// Deny if event kind is in list for NotEquals
|
||||
e.kind = 10;
|
||||
c.operator = Operator::NotEquals;
|
||||
assert!(!c.allows_event(&e));
|
||||
// Allow if event kind is not in list for NotEquals
|
||||
e.kind = 99;
|
||||
c.operator = Operator::NotEquals;
|
||||
assert!(c.allows_event(&e));
|
||||
// Always deny if GreaterThan/LessThan for a list
|
||||
c.operator = Operator::LessThan;
|
||||
assert!(!c.allows_event(&e));
|
||||
c.operator = Operator::GreaterThan;
|
||||
assert!(!c.allows_event(&e));
|
||||
}
|
||||
}
|
70
src/error.rs
70
src/error.rs
@@ -17,10 +17,18 @@ pub enum Error {
|
||||
ConnWriteError,
|
||||
#[error("EVENT parse failed")]
|
||||
EventParseFailed,
|
||||
#[error("ClOSE message parse failed")]
|
||||
#[error("CLOSE message parse failed")]
|
||||
CloseParseFailed,
|
||||
#[error("Event validation failed")]
|
||||
EventInvalid,
|
||||
#[error("Event invalid signature")]
|
||||
EventInvalidSignature,
|
||||
#[error("Event invalid id")]
|
||||
EventInvalidId,
|
||||
#[error("Event malformed pubkey")]
|
||||
EventMalformedPubkey,
|
||||
#[error("Event could not canonicalize")]
|
||||
EventCouldNotCanonicalize,
|
||||
#[error("Event too large")]
|
||||
EventMaxLengthError(usize),
|
||||
#[error("Subscription identifier max length exceeded")]
|
||||
SubIdMaxLengthError,
|
||||
#[error("Maximum concurrent subscription count reached")]
|
||||
@@ -34,6 +42,55 @@ pub enum Error {
|
||||
CommandUnknownError,
|
||||
#[error("SQL error")]
|
||||
SqlError(rusqlite::Error),
|
||||
#[error("Config error")]
|
||||
ConfigError(config::ConfigError),
|
||||
#[error("Data directory does not exist")]
|
||||
DatabaseDirError,
|
||||
#[error("Database Connection Pool Error")]
|
||||
DatabasePoolError(r2d2::Error),
|
||||
#[error("Custom Error : {0}")]
|
||||
CustomError(String),
|
||||
#[error("Task join error")]
|
||||
JoinError,
|
||||
#[error("Hyper Client error")]
|
||||
HyperError(hyper::Error),
|
||||
#[error("Hex encoding error")]
|
||||
HexError(hex::FromHexError),
|
||||
#[error("Delegation parse error")]
|
||||
DelegationParseError,
|
||||
#[error("Unknown/Undocumented")]
|
||||
UnknownError,
|
||||
}
|
||||
|
||||
//impl From<Box<dyn std::error::Error>> for Error {
|
||||
// fn from(e: Box<dyn std::error::Error>) -> Self {
|
||||
// Error::CustomError("error".to_owned())
|
||||
// }
|
||||
//}
|
||||
|
||||
impl From<hex::FromHexError> for Error {
|
||||
fn from(h: hex::FromHexError) -> Self {
|
||||
Error::HexError(h)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<hyper::Error> for Error {
|
||||
fn from(h: hyper::Error) -> Self {
|
||||
Error::HyperError(h)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<r2d2::Error> for Error {
|
||||
fn from(d: r2d2::Error) -> Self {
|
||||
Error::DatabasePoolError(d)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<tokio::task::JoinError> for Error {
|
||||
/// Wrap SQL error
|
||||
fn from(_j: tokio::task::JoinError) -> Self {
|
||||
Error::JoinError
|
||||
}
|
||||
}
|
||||
|
||||
impl From<rusqlite::Error> for Error {
|
||||
@@ -56,3 +113,10 @@ impl From<WsError> for Error {
|
||||
Error::WebsocketError(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<config::ConfigError> for Error {
|
||||
/// Wrap Config error
|
||||
fn from(r: config::ConfigError) -> Self {
|
||||
Error::ConfigError(r)
|
||||
}
|
||||
}
|
||||
|
377
src/event.rs
377
src/event.rs
@@ -1,33 +1,55 @@
|
||||
//! Event parsing and validation
|
||||
use crate::delegation::validate_delegation;
|
||||
use crate::error::Error::*;
|
||||
use crate::error::Result;
|
||||
use crate::nip05;
|
||||
use crate::utils::unix_time;
|
||||
use bitcoin_hashes::{sha256, Hash};
|
||||
use log::info;
|
||||
use secp256k1::{schnorrsig, Secp256k1};
|
||||
use lazy_static::lazy_static;
|
||||
use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
use serde_json::value::Value;
|
||||
use serde_json::Number;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::str::FromStr;
|
||||
use tracing::{debug, info};
|
||||
|
||||
/// Event command in network format
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
lazy_static! {
|
||||
/// Secp256k1 verification instance.
|
||||
pub static ref SECP: Secp256k1<VerifyOnly> = Secp256k1::verification_only();
|
||||
}
|
||||
|
||||
/// Event command in network format.
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct EventCmd {
|
||||
cmd: String, // expecting static "EVENT"
|
||||
event: Event,
|
||||
}
|
||||
|
||||
/// Event parsed
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
impl EventCmd {
|
||||
pub fn event_id(&self) -> &str {
|
||||
&self.event.id
|
||||
}
|
||||
}
|
||||
|
||||
/// Parsed nostr event.
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Event {
|
||||
pub id: String,
|
||||
pub(crate) pubkey: String,
|
||||
pub(crate) created_at: u64,
|
||||
pub(crate) kind: u64,
|
||||
pub pubkey: String,
|
||||
#[serde(skip)]
|
||||
pub delegated_by: Option<String>,
|
||||
pub created_at: u64,
|
||||
pub kind: u64,
|
||||
#[serde(deserialize_with = "tag_from_string")]
|
||||
// NOTE: array-of-arrays may need to be more general than a string container
|
||||
pub(crate) tags: Vec<Vec<String>>,
|
||||
pub(crate) content: String,
|
||||
pub(crate) sig: String,
|
||||
pub tags: Vec<Vec<String>>,
|
||||
pub content: String,
|
||||
pub sig: String,
|
||||
// Optimization for tag search, built on demand.
|
||||
#[serde(skip)]
|
||||
pub tagidx: Option<HashMap<char, HashSet<String>>>,
|
||||
}
|
||||
|
||||
/// Simple tag type for array of array of strings.
|
||||
@@ -39,7 +61,26 @@ where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let opt = Option::deserialize(deserializer)?;
|
||||
Ok(opt.unwrap_or_else(Vec::new))
|
||||
Ok(opt.unwrap_or_default())
|
||||
}
|
||||
|
||||
/// Attempt to form a single-char tag name.
|
||||
pub fn single_char_tagname(tagname: &str) -> Option<char> {
|
||||
// We return the tag character if and only if the tagname consists
|
||||
// of a single char.
|
||||
let mut tagnamechars = tagname.chars();
|
||||
let firstchar = tagnamechars.next();
|
||||
match firstchar {
|
||||
Some(_) => {
|
||||
// check second char
|
||||
if tagnamechars.next().is_none() {
|
||||
firstchar
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert network event to parsed/validated event.
|
||||
@@ -48,22 +89,160 @@ impl From<EventCmd> for Result<Event> {
|
||||
// ensure command is correct
|
||||
if ec.cmd != "EVENT" {
|
||||
Err(CommandUnknownError)
|
||||
} else if ec.event.is_valid() {
|
||||
Ok(ec.event)
|
||||
} else {
|
||||
Err(EventInvalid)
|
||||
ec.event.validate().map(|_| {
|
||||
let mut e = ec.event;
|
||||
e.build_index();
|
||||
e.update_delegation();
|
||||
e
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Event {
|
||||
#[cfg(test)]
|
||||
pub fn simple_event() -> Event {
|
||||
Event {
|
||||
id: "0".to_owned(),
|
||||
pubkey: "0".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: vec![],
|
||||
content: "".to_owned(),
|
||||
sig: "0".to_owned(),
|
||||
tagidx: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_kind_metadata(&self) -> bool {
|
||||
self.kind == 0
|
||||
}
|
||||
|
||||
/// Pull a NIP-05 Name out of the event, if one exists
|
||||
pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
|
||||
if self.is_kind_metadata() {
|
||||
// very quick check if we should attempt to parse this json
|
||||
if self.content.contains("\"nip05\"") {
|
||||
// Parse into JSON
|
||||
let md_parsed: Value = serde_json::from_str(&self.content).ok()?;
|
||||
let md_map = md_parsed.as_object()?;
|
||||
let nip05_str = md_map.get("nip05")?.as_str()?;
|
||||
return nip05::Nip05Name::try_from(nip05_str).ok();
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
// is this event delegated (properly)?
|
||||
// does the signature match, and are conditions valid?
|
||||
// if so, return an alternate author for the event
|
||||
pub fn delegated_author(&self) -> Option<String> {
|
||||
// is there a delegation tag?
|
||||
let delegation_tag: Vec<String> = self
|
||||
.tags
|
||||
.iter()
|
||||
.filter(|x| x.len() == 4)
|
||||
.filter(|x| x.get(0).unwrap() == "delegation")
|
||||
.take(1)
|
||||
.next()?
|
||||
.to_vec(); // get first tag
|
||||
|
||||
//let delegation_tag = self.tag_values_by_name("delegation");
|
||||
// delegation tags should have exactly 3 elements after the name (pubkey, condition, sig)
|
||||
// the event is signed by the delagatee
|
||||
let delegatee = &self.pubkey;
|
||||
// the delegation tag references the claimed delagator
|
||||
let delegator: &str = delegation_tag.get(1)?;
|
||||
let querystr: &str = delegation_tag.get(2)?;
|
||||
let sig: &str = delegation_tag.get(3)?;
|
||||
|
||||
// attempt to get a condition query; this requires the delegation to have a valid signature.
|
||||
if let Some(cond_query) = validate_delegation(delegator, delegatee, querystr, sig) {
|
||||
// The signature was valid, now we ensure the delegation
|
||||
// condition is valid for this event:
|
||||
if cond_query.allows_event(self) {
|
||||
// since this is allowed, we will provide the delegatee
|
||||
Some(delegator.into())
|
||||
} else {
|
||||
debug!("an event failed to satisfy delegation conditions");
|
||||
None
|
||||
}
|
||||
} else {
|
||||
debug!("event had had invalid delegation signature");
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Update delegation status
|
||||
fn update_delegation(&mut self) {
|
||||
self.delegated_by = self.delegated_author();
|
||||
}
|
||||
/// Build an event tag index
|
||||
fn build_index(&mut self) {
|
||||
// if there are no tags; just leave the index as None
|
||||
if self.tags.is_empty() {
|
||||
return;
|
||||
}
|
||||
// otherwise, build an index
|
||||
let mut idx: HashMap<char, HashSet<String>> = HashMap::new();
|
||||
// iterate over tags that have at least 2 elements
|
||||
for t in self.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagnamechar_opt = single_char_tagname(tagname);
|
||||
if tagnamechar_opt.is_none() {
|
||||
continue;
|
||||
}
|
||||
let tagnamechar = tagnamechar_opt.unwrap();
|
||||
let tagval = t.get(1).unwrap();
|
||||
// ensure a vector exists for this tag
|
||||
idx.entry(tagnamechar).or_insert_with(HashSet::new);
|
||||
// get the tag vec and insert entry
|
||||
let idx_tag_vec = idx.get_mut(&tagnamechar).expect("could not get tag vector");
|
||||
idx_tag_vec.insert(tagval.clone());
|
||||
}
|
||||
// save the tag structure
|
||||
self.tagidx = Some(idx);
|
||||
}
|
||||
|
||||
/// Create a short event identifier, suitable for logging.
|
||||
pub fn get_event_id_prefix(&self) -> String {
|
||||
self.id.chars().take(8).collect()
|
||||
}
|
||||
pub fn get_author_prefix(&self) -> String {
|
||||
self.pubkey.chars().take(8).collect()
|
||||
}
|
||||
|
||||
/// Retrieve tag initial values across all tags matching the name
|
||||
pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
|
||||
self.tags
|
||||
.iter()
|
||||
.filter(|x| x.len() > 1)
|
||||
.filter(|x| x.get(0).unwrap() == tag_name)
|
||||
.map(|x| x.get(1).unwrap().to_owned())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
|
||||
if let Some(allowable_future) = reject_future_seconds {
|
||||
let curr_time = unix_time();
|
||||
// calculate difference, plus how far future we allow
|
||||
if curr_time + (allowable_future as u64) < self.created_at {
|
||||
let delta = self.created_at - curr_time;
|
||||
debug!(
|
||||
"event is too far in the future ({} seconds), rejecting",
|
||||
delta
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
/// Check if this event has a valid signature.
|
||||
fn is_valid(&self) -> bool {
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
// TODO: return a Result with a reason for invalid events
|
||||
// validation is performed by:
|
||||
// * parsing JSON string into event fields
|
||||
// * create an array:
|
||||
@@ -71,8 +250,8 @@ impl Event {
|
||||
// * serialize with no spaces/newlines
|
||||
let c_opt = self.to_canonical();
|
||||
if c_opt.is_none() {
|
||||
info!("event could not be canonicalized");
|
||||
return false;
|
||||
debug!("could not canonicalize");
|
||||
return Err(EventCouldNotCanonicalize);
|
||||
}
|
||||
let c = c_opt.unwrap();
|
||||
// * compute the sha256sum.
|
||||
@@ -80,15 +259,23 @@ impl Event {
|
||||
let hex_digest = format!("{:x}", digest);
|
||||
// * ensure the id matches the computed sha256sum.
|
||||
if self.id != hex_digest {
|
||||
return false;
|
||||
debug!("event id does not match digest");
|
||||
return Err(EventInvalidId);
|
||||
}
|
||||
// * validate the message digest (sig) using the pubkey & computed sha256 message hash.
|
||||
let secp = Secp256k1::new();
|
||||
let sig = schnorrsig::Signature::from_str(&self.sig).unwrap();
|
||||
let message = secp256k1::Message::from(digest);
|
||||
let pubkey = schnorrsig::PublicKey::from_str(&self.pubkey).unwrap();
|
||||
let verify = secp.schnorrsig_verify(&sig, &message, &pubkey);
|
||||
matches!(verify, Ok(()))
|
||||
let sig = schnorr::Signature::from_str(&self.sig).unwrap();
|
||||
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
|
||||
if let Ok(pubkey) = XOnlyPublicKey::from_str(&self.pubkey) {
|
||||
SECP.verify_schnorr(&sig, &msg, &pubkey)
|
||||
.map_err(|_| EventInvalidSignature)
|
||||
} else {
|
||||
debug!("client sent malformed pubkey");
|
||||
Err(EventMalformedPubkey)
|
||||
}
|
||||
} else {
|
||||
info!("error converting digest to secp256k1 message");
|
||||
Err(EventInvalidSignature)
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert event to canonical representation for signing.
|
||||
@@ -128,89 +315,67 @@ impl Event {
|
||||
serde_json::Value::Array(tags)
|
||||
}
|
||||
|
||||
/// Get a list of event tags.
|
||||
pub fn get_event_tags(&self) -> Vec<&str> {
|
||||
let mut etags = vec![];
|
||||
for t in self.tags.iter() {
|
||||
if t.len() >= 2 && t.get(0).unwrap() == "e" {
|
||||
etags.push(&t.get(1).unwrap()[..]);
|
||||
}
|
||||
/// Determine if the given tag and value set intersect with tags in this event.
|
||||
pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
|
||||
match &self.tagidx {
|
||||
// check if this is indexable tagname
|
||||
Some(idx) => match idx.get(&tagname) {
|
||||
Some(valset) => {
|
||||
let common = valset.intersection(check);
|
||||
common.count() > 0
|
||||
}
|
||||
None => false,
|
||||
},
|
||||
None => false,
|
||||
}
|
||||
etags
|
||||
}
|
||||
|
||||
/// Get a list of pubkey/petname tags.
|
||||
pub fn get_pubkey_tags(&self) -> Vec<&str> {
|
||||
let mut ptags = vec![];
|
||||
for t in self.tags.iter() {
|
||||
if t.len() >= 2 && t.get(0).unwrap() == "p" {
|
||||
ptags.push(&t.get(1).unwrap()[..]);
|
||||
}
|
||||
}
|
||||
ptags
|
||||
}
|
||||
|
||||
/// Check if a given event is referenced in an event tag.
|
||||
pub fn event_tag_match(&self, eventid: &str) -> bool {
|
||||
self.get_event_tags().contains(&eventid)
|
||||
}
|
||||
|
||||
/// Check if a given event is referenced in an event tag.
|
||||
pub fn pubkey_tag_match(&self, pubkey: &str) -> bool {
|
||||
self.get_pubkey_tags().contains(&pubkey)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
fn simple_event() -> Event {
|
||||
Event {
|
||||
id: "0".to_owned(),
|
||||
pubkey: "0".to_owned(),
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: vec![],
|
||||
content: "".to_owned(),
|
||||
sig: "0".to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_creation() {
|
||||
// create an event
|
||||
let event = simple_event();
|
||||
let event = Event::simple_event();
|
||||
assert_eq!(event.id, "0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_serialize() -> Result<()> {
|
||||
// serialize an event to JSON string
|
||||
let event = simple_event();
|
||||
let event = Event::simple_event();
|
||||
let j = serde_json::to_string(&event)?;
|
||||
assert_eq!(j, "{\"id\":\"0\",\"pubkey\":\"0\",\"created_at\":0,\"kind\":0,\"tags\":[],\"content\":\"\",\"sig\":\"0\"}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_event_tag_match() -> Result<()> {
|
||||
let event = simple_event();
|
||||
assert!(!event.event_tag_match("foo"));
|
||||
Ok(())
|
||||
fn empty_event_tag_match() {
|
||||
let event = Event::simple_event();
|
||||
assert!(!event
|
||||
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_event_tag_match() -> Result<()> {
|
||||
let mut event = simple_event();
|
||||
fn single_event_tag_match() {
|
||||
let mut event = Event::simple_event();
|
||||
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
|
||||
assert!(event.event_tag_match("foo"));
|
||||
Ok(())
|
||||
event.build_index();
|
||||
assert_eq!(
|
||||
event.generic_tag_val_intersect(
|
||||
'e',
|
||||
&HashSet::from(["foo".to_owned(), "bar".to_owned()])
|
||||
),
|
||||
true
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_tags_serialize() -> Result<()> {
|
||||
// serialize an event with tags to JSON string
|
||||
let mut event = simple_event();
|
||||
let mut event = Event::simple_event();
|
||||
event.tags = vec![
|
||||
vec![
|
||||
"e".to_owned(),
|
||||
@@ -242,22 +407,79 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
kind: 1,
|
||||
tags: vec![],
|
||||
content: "this is a test".to_owned(),
|
||||
sig: "abcde".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
let c = e.to_canonical();
|
||||
let expected = Some(r#"[0,"012345",501234,1,[],"this is a test"]"#.to_owned());
|
||||
assert_eq!(c, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_tag_select() {
|
||||
let e = Event {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
kind: 1,
|
||||
tags: vec![
|
||||
vec!["j".to_owned(), "abc".to_owned()],
|
||||
vec!["e".to_owned(), "foo".to_owned()],
|
||||
vec!["e".to_owned(), "bar".to_owned()],
|
||||
vec!["e".to_owned(), "baz".to_owned()],
|
||||
vec![
|
||||
"p".to_owned(),
|
||||
"aaaa".to_owned(),
|
||||
"ws://example.com".to_owned(),
|
||||
],
|
||||
],
|
||||
content: "this is a test".to_owned(),
|
||||
sig: "abcde".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
let v = e.tag_values_by_name("e");
|
||||
assert_eq!(v, vec!["foo", "bar", "baz"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_no_tag_select() {
|
||||
let e = Event {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
kind: 1,
|
||||
tags: vec![
|
||||
vec!["j".to_owned(), "abc".to_owned()],
|
||||
vec!["e".to_owned(), "foo".to_owned()],
|
||||
vec!["e".to_owned(), "baz".to_owned()],
|
||||
vec![
|
||||
"p".to_owned(),
|
||||
"aaaa".to_owned(),
|
||||
"ws://example.com".to_owned(),
|
||||
],
|
||||
],
|
||||
content: "this is a test".to_owned(),
|
||||
sig: "abcde".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
let v = e.tag_values_by_name("x");
|
||||
// asking for tags that don't exist just returns zero-length vector
|
||||
assert_eq!(v.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_canonical_with_tags() {
|
||||
let e = Event {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
kind: 1,
|
||||
tags: vec![
|
||||
@@ -270,6 +492,7 @@ mod tests {
|
||||
],
|
||||
content: "this is a test".to_owned(),
|
||||
sig: "abcde".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
let c = e.to_canonical();
|
||||
let expected_json = r###"[0,"012345",501234,1,[["#e","aoeu"],["#p","aaaa","ws://example.com"]],"this is a test"]"###;
|
||||
|
158
src/hexrange.rs
Normal file
158
src/hexrange.rs
Normal file
@@ -0,0 +1,158 @@
|
||||
//! Utilities for searching hexadecimal
|
||||
use crate::utils::is_hex;
|
||||
use hex;
|
||||
|
||||
/// Types of hexadecimal queries.
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)]
|
||||
pub enum HexSearch {
|
||||
// when no range is needed, exact 32-byte
|
||||
Exact(Vec<u8>),
|
||||
// lower (inclusive) and upper range (exclusive)
|
||||
Range(Vec<u8>, Vec<u8>),
|
||||
// lower bound only, upper bound is MAX inclusive
|
||||
LowerOnly(Vec<u8>),
|
||||
}
|
||||
|
||||
/// Check if a string contains only f chars
|
||||
fn is_all_fs(s: &str) -> bool {
|
||||
s.chars().all(|x| x == 'f' || x == 'F')
|
||||
}
|
||||
|
||||
/// Find the next hex sequence greater than the argument.
|
||||
pub fn hex_range(s: &str) -> Option<HexSearch> {
|
||||
// handle special cases
|
||||
if !is_hex(s) || s.len() > 64 {
|
||||
return None;
|
||||
}
|
||||
if s.len() == 64 {
|
||||
return Some(HexSearch::Exact(hex::decode(s).ok()?));
|
||||
}
|
||||
// if s is odd, add a zero
|
||||
let mut hash_base = s.to_owned();
|
||||
let mut odd = hash_base.len() % 2 != 0;
|
||||
if odd {
|
||||
// extend the string to make it even
|
||||
hash_base.push('0');
|
||||
}
|
||||
let base = hex::decode(hash_base).ok()?;
|
||||
// check for all ff's
|
||||
if is_all_fs(s) {
|
||||
// there is no higher bound, we only want to search for blobs greater than this.
|
||||
return Some(HexSearch::LowerOnly(base));
|
||||
}
|
||||
|
||||
// return a range
|
||||
let mut upper = base.clone();
|
||||
let mut byte_len = upper.len();
|
||||
|
||||
// for odd strings, we made them longer, but we want to increment the upper char (+16).
|
||||
// we know we can do this without overflowing because we explicitly set the bottom half to 0's.
|
||||
while byte_len > 0 {
|
||||
byte_len -= 1;
|
||||
// check if byte can be incremented, or if we need to carry.
|
||||
let b = upper[byte_len];
|
||||
if b == u8::MAX {
|
||||
// reset and carry
|
||||
upper[byte_len] = 0;
|
||||
} else if odd {
|
||||
// check if first char in this byte is NOT 'f'
|
||||
if b < 240 {
|
||||
upper[byte_len] = b + 16; // bump up the first character in this byte
|
||||
// increment done, stop iterating through the vec
|
||||
break;
|
||||
}
|
||||
// if it is 'f', reset the byte to 0 and do a carry
|
||||
// reset and carry
|
||||
upper[byte_len] = 0;
|
||||
// done with odd logic, so don't repeat this
|
||||
odd = false;
|
||||
} else {
|
||||
// bump up the first character in this byte
|
||||
upper[byte_len] = b + 1;
|
||||
// increment done, stop iterating
|
||||
break;
|
||||
}
|
||||
}
|
||||
Some(HexSearch::Range(base, upper))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::error::Result;
|
||||
|
||||
#[test]
|
||||
fn hex_range_exact() -> Result<()> {
|
||||
let hex = "abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00";
|
||||
let r = hex_range(hex);
|
||||
assert_eq!(
|
||||
r,
|
||||
Some(HexSearch::Exact(hex::decode(hex).expect("invalid hex")))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
fn hex_full_range() -> Result<()> {
|
||||
let hex = "aaaa";
|
||||
let hex_upper = "aaab";
|
||||
let r = hex_range(hex);
|
||||
assert_eq!(
|
||||
r,
|
||||
Some(HexSearch::Range(
|
||||
hex::decode(hex).expect("invalid hex"),
|
||||
hex::decode(hex_upper).expect("invalid hex")
|
||||
))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hex_full_range_odd() -> Result<()> {
|
||||
let r = hex_range("abc");
|
||||
assert_eq!(
|
||||
r,
|
||||
Some(HexSearch::Range(
|
||||
hex::decode("abc0").expect("invalid hex"),
|
||||
hex::decode("abd0").expect("invalid hex")
|
||||
))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hex_full_range_odd_end_f() -> Result<()> {
|
||||
let r = hex_range("abf");
|
||||
assert_eq!(
|
||||
r,
|
||||
Some(HexSearch::Range(
|
||||
hex::decode("abf0").expect("invalid hex"),
|
||||
hex::decode("ac00").expect("invalid hex")
|
||||
))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hex_no_upper() -> Result<()> {
|
||||
let r = hex_range("ffff");
|
||||
assert_eq!(
|
||||
r,
|
||||
Some(HexSearch::LowerOnly(
|
||||
hex::decode("ffff").expect("invalid hex")
|
||||
))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hex_no_upper_odd() -> Result<()> {
|
||||
let r = hex_range("fff");
|
||||
assert_eq!(
|
||||
r,
|
||||
Some(HexSearch::LowerOnly(
|
||||
hex::decode("fff0").expect("invalid hex")
|
||||
))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
43
src/info.rs
Normal file
43
src/info.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
//! Relay metadata using NIP-11
|
||||
/// Relay Info
|
||||
use crate::config;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub const CARGO_PKG_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct RelayInfo {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub id: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub description: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pubkey: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub contact: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub supported_nips: Option<Vec<i64>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub software: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub version: Option<String>,
|
||||
}
|
||||
|
||||
/// Convert an Info configuration into public Relay Info
|
||||
impl From<config::Info> for RelayInfo {
|
||||
fn from(i: config::Info) -> Self {
|
||||
RelayInfo {
|
||||
id: i.relay_url,
|
||||
name: i.name,
|
||||
description: i.description,
|
||||
pubkey: i.pubkey,
|
||||
contact: i.contact,
|
||||
supported_nips: Some(vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 26]),
|
||||
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
|
||||
version: CARGO_PKG_VERSION.map(|x| x.to_owned()),
|
||||
}
|
||||
}
|
||||
}
|
11
src/lib.rs
11
src/lib.rs
@@ -1,7 +1,16 @@
|
||||
pub mod close;
|
||||
pub mod config;
|
||||
pub mod conn;
|
||||
pub mod db;
|
||||
pub mod delegation;
|
||||
pub mod error;
|
||||
pub mod event;
|
||||
pub mod protostream;
|
||||
pub mod hexrange;
|
||||
pub mod info;
|
||||
pub mod nip05;
|
||||
pub mod notice;
|
||||
pub mod schema;
|
||||
pub mod subscription;
|
||||
pub mod utils;
|
||||
// Public API for creating relays programatically
|
||||
pub mod server;
|
||||
|
272
src/main.rs
272
src/main.rs
@@ -1,233 +1,51 @@
|
||||
//! Server process
|
||||
use futures::SinkExt;
|
||||
use futures::StreamExt;
|
||||
use log::*;
|
||||
use nostr_rs_relay::close::Close;
|
||||
use nostr_rs_relay::conn;
|
||||
use nostr_rs_relay::db;
|
||||
use nostr_rs_relay::error::{Error, Result};
|
||||
use nostr_rs_relay::event::Event;
|
||||
use nostr_rs_relay::protostream;
|
||||
use nostr_rs_relay::protostream::NostrMessage::*;
|
||||
use nostr_rs_relay::protostream::NostrResponse::*;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use nostr_rs_relay::config;
|
||||
use nostr_rs_relay::server::start_server;
|
||||
use std::env;
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
use tokio::runtime::Builder;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::broadcast::{Receiver, Sender};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
use std::sync::mpsc as syncmpsc;
|
||||
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
|
||||
use std::thread;
|
||||
use tracing::info;
|
||||
|
||||
use console_subscriber::ConsoleLayer;
|
||||
|
||||
/// Return a requested DB name from command line arguments.
|
||||
fn db_from_args(args: &[String]) -> Option<String> {
|
||||
if args.len() == 3 && args.get(1) == Some(&"--db".to_owned()) {
|
||||
return args.get(2).map(std::clone::Clone::clone);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Start running a Nostr relay server.
|
||||
fn main() -> Result<(), Error> {
|
||||
// setup logger and environment
|
||||
let _ = env_logger::try_init();
|
||||
let addr = env::args()
|
||||
.nth(1)
|
||||
.unwrap_or_else(|| "0.0.0.0:8080".to_string());
|
||||
// configure tokio runtime
|
||||
let rt = Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.thread_name("tokio-ws")
|
||||
.build()
|
||||
.unwrap();
|
||||
// start tokio
|
||||
rt.block_on(async {
|
||||
let listener = TcpListener::bind(&addr).await.expect("Failed to bind");
|
||||
info!("listening on: {}", addr);
|
||||
// all client-submitted valid events are broadcast to every
|
||||
// other client on this channel. This should be large enough
|
||||
// to accomodate slower readers (messages are dropped if
|
||||
// clients can not keep up).
|
||||
let (bcast_tx, _) = broadcast::channel::<Event>(4096);
|
||||
// validated events that need to be persisted are sent to the
|
||||
// database on via this channel.
|
||||
let (event_tx, event_rx) = mpsc::channel::<Event>(16);
|
||||
// start the database writer thread. Give it a channel for
|
||||
// writing events, and for publishing events that have been
|
||||
// written (to all connected clients).
|
||||
db::db_writer(event_rx, bcast_tx.clone()).await;
|
||||
// establish a channel for letting all threads now about a
|
||||
// requested server shutdown.
|
||||
let (invoke_shutdown, _) = broadcast::channel::<()>(1);
|
||||
let ctrl_c_shutdown = invoke_shutdown.clone();
|
||||
// listen for ctrl-c interruupts
|
||||
tokio::spawn(async move {
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
info!("shutting down due to SIGINT");
|
||||
ctrl_c_shutdown.send(()).ok();
|
||||
});
|
||||
// track unique client connection count
|
||||
let mut client_accept_count: usize = 0;
|
||||
let mut stop_listening = invoke_shutdown.subscribe();
|
||||
// handle new client connection requests, or SIGINT signals.
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = stop_listening.recv() => {
|
||||
break;
|
||||
}
|
||||
Ok((stream, _)) = listener.accept() => {
|
||||
client_accept_count += 1;
|
||||
info!("creating new connection for client #{}",client_accept_count);
|
||||
tokio::spawn(nostr_server(
|
||||
stream,
|
||||
bcast_tx.clone(),
|
||||
event_tx.clone(),
|
||||
invoke_shutdown.subscribe(),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
fn main() {
|
||||
// setup tracing
|
||||
let _trace_sub = tracing_subscriber::fmt::try_init();
|
||||
info!("Starting up from main");
|
||||
// get database directory from args
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let db_dir: Option<String> = db_from_args(&args);
|
||||
// configure settings from config.toml
|
||||
// replace default settings with those read from config.toml
|
||||
let mut settings = config::Settings::new();
|
||||
|
||||
if settings.diagnostics.tracing {
|
||||
// enable tracing with tokio-console
|
||||
ConsoleLayer::builder().with_default_env().init();
|
||||
}
|
||||
// update with database location
|
||||
if let Some(db) = db_dir {
|
||||
settings.database.data_directory = db;
|
||||
}
|
||||
|
||||
let (_, ctrl_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
|
||||
// run this in a new thread
|
||||
let handle = thread::spawn(|| {
|
||||
// we should have a 'control plane' channel to monitor and bump the server.
|
||||
// this will let us do stuff like clear the database, shutdown, etc.
|
||||
let _svr = start_server(settings, ctrl_rx);
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handle new client connections. This runs through an event loop
|
||||
/// for all client communication.
|
||||
async fn nostr_server(
|
||||
stream: TcpStream,
|
||||
broadcast: Sender<Event>,
|
||||
event_tx: tokio::sync::mpsc::Sender<Event>,
|
||||
mut shutdown: Receiver<()>,
|
||||
) {
|
||||
// get a broadcast channel for clients to communicate on
|
||||
let mut bcast_rx = broadcast.subscribe();
|
||||
// upgrade the TCP connection to WebSocket
|
||||
let conn = tokio_tungstenite::accept_async(stream).await;
|
||||
let ws_stream = conn.expect("websocket handshake error");
|
||||
// wrap websocket into a stream & sink of Nostr protocol messages
|
||||
let mut nostr_stream = protostream::wrap_ws_in_nostr(ws_stream);
|
||||
// Track internal client state
|
||||
let mut conn = conn::ClientConn::new();
|
||||
let cid = conn.get_client_prefix();
|
||||
// Create a channel for receiving query results from the database.
|
||||
// we will send out the tx handle to any query we generate.
|
||||
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(256);
|
||||
// maintain a hashmap of a oneshot channel for active subscriptions.
|
||||
// when these subscriptions are cancelled, make a message
|
||||
// available to the executing query so it knows to stop.
|
||||
//let (abandon_query_tx, _) = oneshot::channel::<()>();
|
||||
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
|
||||
// for stats, keep track of how many events the client published,
|
||||
// and how many it received from queries.
|
||||
let mut client_published_event_count: usize = 0;
|
||||
let mut client_received_event_count: usize = 0;
|
||||
info!("new connection for client: {}", cid);
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = shutdown.recv() => {
|
||||
// server shutting down, exit loop
|
||||
break;
|
||||
},
|
||||
Some(query_result) = query_rx.recv() => {
|
||||
// database informed us of a query result we asked for
|
||||
let res = EventRes(query_result.sub_id,query_result.event);
|
||||
client_received_event_count += 1;
|
||||
nostr_stream.send(res).await.ok();
|
||||
},
|
||||
Ok(global_event) = bcast_rx.recv() => {
|
||||
// an event has been broadcast to all clients
|
||||
// first check if there is a subscription for this event.
|
||||
let matching_subs = conn.get_matching_subscriptions(&global_event);
|
||||
for s in matching_subs {
|
||||
// TODO: serialize at broadcast time, instead of
|
||||
// once for each consumer.
|
||||
if let Ok(event_str) = serde_json::to_string(&global_event) {
|
||||
debug!("sub match: client: {}, sub: {}, event: {}",
|
||||
cid, s,
|
||||
global_event.get_event_id_prefix());
|
||||
// create an event response and send it
|
||||
let res = EventRes(s.to_owned(),event_str);
|
||||
nostr_stream.send(res).await.ok();
|
||||
} else {
|
||||
warn!("could not convert event to string");
|
||||
}
|
||||
}
|
||||
},
|
||||
// check if this client has a subscription
|
||||
proto_next = nostr_stream.next() => {
|
||||
match proto_next {
|
||||
Some(Ok(EventMsg(ec))) => {
|
||||
// An EventCmd needs to be validated to be converted into an Event
|
||||
// handle each type of message
|
||||
let parsed : Result<Event> = Result::<Event>::from(ec);
|
||||
match parsed {
|
||||
Ok(e) => {
|
||||
let id_prefix:String = e.id.chars().take(8).collect();
|
||||
debug!("successfully parsed/validated event: {} from client: {}", id_prefix, cid);
|
||||
// Write this to the database
|
||||
event_tx.send(e.clone()).await.ok();
|
||||
client_published_event_count += 1;
|
||||
},
|
||||
Err(_) => {
|
||||
info!("client {} sent an invalid event", cid);
|
||||
nostr_stream.send(NoticeRes("event was invalid".to_owned())).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
Some(Ok(SubMsg(s))) => {
|
||||
debug!("client {} requesting a subscription", cid);
|
||||
// subscription handling consists of:
|
||||
// * registering the subscription so future events can be matched
|
||||
// * making a channel to cancel to request later
|
||||
// * sending a request for a SQL query
|
||||
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
|
||||
match conn.subscribe(s.clone()) {
|
||||
Ok(()) => {
|
||||
running_queries.insert(s.id.to_owned(), abandon_query_tx);
|
||||
// start a database query
|
||||
db::db_query(s, query_tx.clone(), abandon_query_rx).await;
|
||||
},
|
||||
Err(e) => {
|
||||
info!("Subscription error: {}", e);
|
||||
nostr_stream.send(NoticeRes(format!("{}",e))).await.ok();
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
Some(Ok(CloseMsg(cc))) => {
|
||||
// closing a request simply removes the subscription.
|
||||
let parsed : Result<Close> = Result::<Close>::from(cc);
|
||||
match parsed {
|
||||
Ok(c) => {
|
||||
// check if a query is currently
|
||||
// running, and remove it if so.
|
||||
let stop_tx = running_queries.remove(&c.id);
|
||||
if let Some(tx) = stop_tx {
|
||||
tx.send(()).ok();
|
||||
}
|
||||
// stop checking new events against
|
||||
// the subscription
|
||||
conn.unsubscribe(c);
|
||||
},
|
||||
Err(_) => {
|
||||
info!("invalid command ignored");
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
None => {
|
||||
debug!("normal websocket close from client: {}",cid);
|
||||
break;
|
||||
},
|
||||
Some(Err(Error::ConnError)) => {
|
||||
debug!("got connection close/error, disconnecting client: {}",cid);
|
||||
break;
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
info!("got non-fatal error from client: {}, error: {:?}", cid, e);
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
// connection cleanup - ensure any still running queries are terminated.
|
||||
for (_, stop_tx) in running_queries.into_iter() {
|
||||
stop_tx.send(()).ok();
|
||||
}
|
||||
info!(
|
||||
"stopping connection for client: {} (client sent {} event(s), received {})",
|
||||
cid, client_published_event_count, client_received_event_count
|
||||
);
|
||||
// block on nostr thread to finish.
|
||||
handle.join().unwrap();
|
||||
}
|
||||
|
824
src/nip05.rs
Normal file
824
src/nip05.rs
Normal file
@@ -0,0 +1,824 @@
|
||||
//! User verification using NIP-05 names
|
||||
//!
|
||||
//! NIP-05 defines a mechanism for authors to associate an internet
|
||||
//! address with their public key, in metadata events. This module
|
||||
//! consumes a stream of metadata events, and keeps a database table
|
||||
//! updated with the current NIP-05 verification status.
|
||||
use crate::config::VerifiedUsers;
|
||||
use crate::db;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::event::Event;
|
||||
use crate::utils::unix_time;
|
||||
use hyper::body::HttpBody;
|
||||
use hyper::client::connect::HttpConnector;
|
||||
use hyper::Client;
|
||||
use hyper_tls::HttpsConnector;
|
||||
use rand::Rng;
|
||||
use rusqlite::params;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use std::time::SystemTime;
|
||||
use tokio::time::Interval;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
/// NIP-05 verifier state
|
||||
pub struct Verifier {
|
||||
/// Metadata events for us to inspect
|
||||
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
|
||||
/// Newly validated events get written and then broadcast on this channel to subscribers
|
||||
event_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
/// SQLite read query pool
|
||||
read_pool: db::SqlitePool,
|
||||
/// SQLite write query pool
|
||||
write_pool: db::SqlitePool,
|
||||
/// Settings
|
||||
settings: crate::config::Settings,
|
||||
/// HTTP client
|
||||
client: hyper::Client<HttpsConnector<HttpConnector>, hyper::Body>,
|
||||
/// After all accounts are updated, wait this long before checking again.
|
||||
wait_after_finish: Duration,
|
||||
/// Minimum amount of time between HTTP queries
|
||||
http_wait_duration: Duration,
|
||||
/// Interval for updating verification records
|
||||
reverify_interval: Interval,
|
||||
}
|
||||
|
||||
/// A NIP-05 identifier is a local part and domain.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Nip05Name {
|
||||
local: String,
|
||||
domain: String,
|
||||
}
|
||||
|
||||
impl Nip05Name {
|
||||
/// Does this name represent the entire domain?
|
||||
pub fn is_domain_only(&self) -> bool {
|
||||
self.local == "_"
|
||||
}
|
||||
|
||||
/// Determine the URL to query for verification
|
||||
fn to_url(&self) -> Option<http::Uri> {
|
||||
format!(
|
||||
"https://{}/.well-known/nostr.json?name={}",
|
||||
self.domain, self.local
|
||||
)
|
||||
.parse::<http::Uri>()
|
||||
.ok()
|
||||
}
|
||||
}
|
||||
|
||||
// Parsing Nip05Names from strings
|
||||
impl std::convert::TryFrom<&str> for Nip05Name {
|
||||
type Error = Error;
|
||||
fn try_from(inet: &str) -> Result<Self, Self::Error> {
|
||||
// break full name at the @ boundary.
|
||||
let components: Vec<&str> = inet.split('@').collect();
|
||||
if components.len() != 2 {
|
||||
Err(Error::CustomError("too many/few components".to_owned()))
|
||||
} else {
|
||||
// check if local name is valid
|
||||
let local = components[0];
|
||||
let domain = components[1];
|
||||
if local
|
||||
.chars()
|
||||
.all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.')
|
||||
{
|
||||
if domain
|
||||
.chars()
|
||||
.all(|x| x.is_alphanumeric() || x == '-' || x == '.')
|
||||
{
|
||||
Ok(Nip05Name {
|
||||
local: local.to_owned(),
|
||||
domain: domain.to_owned(),
|
||||
})
|
||||
} else {
|
||||
Err(Error::CustomError(
|
||||
"invalid character in domain part".to_owned(),
|
||||
))
|
||||
}
|
||||
} else {
|
||||
Err(Error::CustomError(
|
||||
"invalid character in local part".to_owned(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Nip05Name {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}@{}", self.local, self.domain)
|
||||
}
|
||||
}
|
||||
|
||||
// Current time, with a slight foward jitter in seconds
|
||||
fn now_jitter(sec: u64) -> u64 {
|
||||
// random time between now, and 10min in future.
|
||||
let mut rng = rand::thread_rng();
|
||||
let jitter_amount = rng.gen_range(0..sec);
|
||||
let now = unix_time();
|
||||
now.saturating_add(jitter_amount)
|
||||
}
|
||||
|
||||
/// Check if the specified username and address are present and match in this response body
|
||||
fn body_contains_user(username: &str, address: &str, bytes: hyper::body::Bytes) -> Result<bool> {
|
||||
// convert the body into json
|
||||
let body: serde_json::Value = serde_json::from_slice(&bytes)?;
|
||||
// ensure we have a names object.
|
||||
let names_map = body
|
||||
.as_object()
|
||||
.and_then(|x| x.get("names"))
|
||||
.and_then(|x| x.as_object())
|
||||
.ok_or_else(|| Error::CustomError("not a map".to_owned()))?;
|
||||
// get the pubkey for the requested user
|
||||
let check_name = names_map.get(username).and_then(|x| x.as_str());
|
||||
// ensure the address is a match
|
||||
Ok(check_name.map(|x| x == address).unwrap_or(false))
|
||||
}
|
||||
|
||||
impl Verifier {
|
||||
pub fn new(
|
||||
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
|
||||
event_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
settings: crate::config::Settings,
|
||||
) -> Result<Self> {
|
||||
info!("creating NIP-05 verifier");
|
||||
// build a database connection for reading and writing.
|
||||
let write_pool = db::build_pool(
|
||||
"nip05 writer",
|
||||
&settings,
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE,
|
||||
1, // min conns
|
||||
4, // max conns
|
||||
true, // wait for DB
|
||||
);
|
||||
let read_pool = db::build_pool(
|
||||
"nip05 reader",
|
||||
&settings,
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
|
||||
1, // min conns
|
||||
8, // max conns
|
||||
true, // wait for DB
|
||||
);
|
||||
// setup hyper client
|
||||
let https = HttpsConnector::new();
|
||||
let client = Client::builder().build::<_, hyper::Body>(https);
|
||||
|
||||
// After all accounts have been re-verified, don't check again
|
||||
// for this long.
|
||||
let wait_after_finish = Duration::from_secs(60 * 10);
|
||||
// when we have an active queue of accounts to validate, we
|
||||
// will wait this duration between HTTP requests.
|
||||
let http_wait_duration = Duration::from_secs(1);
|
||||
// setup initial interval for re-verification. If we find
|
||||
// there is no work to be done, it will be reset to a longer
|
||||
// duration.
|
||||
let reverify_interval = tokio::time::interval(http_wait_duration);
|
||||
Ok(Verifier {
|
||||
metadata_rx,
|
||||
event_tx,
|
||||
read_pool,
|
||||
write_pool,
|
||||
settings,
|
||||
client,
|
||||
wait_after_finish,
|
||||
http_wait_duration,
|
||||
reverify_interval,
|
||||
})
|
||||
}
|
||||
|
||||
/// Perform web verification against a NIP-05 name and address.
|
||||
pub async fn get_web_verification(
|
||||
&mut self,
|
||||
nip: &Nip05Name,
|
||||
pubkey: &str,
|
||||
) -> UserWebVerificationStatus {
|
||||
self.get_web_verification_res(nip, pubkey)
|
||||
.await
|
||||
.unwrap_or(UserWebVerificationStatus::Unknown)
|
||||
}
|
||||
|
||||
/// Perform web verification against an `Event` (must be metadata).
|
||||
pub async fn get_web_verification_from_event(
|
||||
&mut self,
|
||||
e: &Event,
|
||||
) -> UserWebVerificationStatus {
|
||||
let nip_parse = e.get_nip05_addr();
|
||||
if let Some(nip) = nip_parse {
|
||||
self.get_web_verification_res(&nip, &e.pubkey)
|
||||
.await
|
||||
.unwrap_or(UserWebVerificationStatus::Unknown)
|
||||
} else {
|
||||
UserWebVerificationStatus::Unknown
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform web verification, with a `Result` return.
|
||||
async fn get_web_verification_res(
|
||||
&mut self,
|
||||
nip: &Nip05Name,
|
||||
pubkey: &str,
|
||||
) -> Result<UserWebVerificationStatus> {
|
||||
// determine if this domain should be checked
|
||||
if !is_domain_allowed(
|
||||
&nip.domain,
|
||||
&self.settings.verified_users.domain_whitelist,
|
||||
&self.settings.verified_users.domain_blacklist,
|
||||
) {
|
||||
return Ok(UserWebVerificationStatus::DomainNotAllowed);
|
||||
}
|
||||
let url = nip
|
||||
.to_url()
|
||||
.ok_or_else(|| Error::CustomError("invalid NIP-05 URL".to_owned()))?;
|
||||
let req = hyper::Request::builder()
|
||||
.method(hyper::Method::GET)
|
||||
.uri(url)
|
||||
.header("Accept", "application/json")
|
||||
.header(
|
||||
"User-Agent",
|
||||
format!(
|
||||
"nostr-rs-relay/{} NIP-05 Verifier",
|
||||
crate::info::CARGO_PKG_VERSION.unwrap()
|
||||
),
|
||||
)
|
||||
.body(hyper::Body::empty())
|
||||
.expect("request builder");
|
||||
|
||||
let response_fut = self.client.request(req);
|
||||
|
||||
// HTTP request with timeout
|
||||
match tokio::time::timeout(Duration::from_secs(5), response_fut).await {
|
||||
Ok(response_res) => {
|
||||
// limit size of verification document to 1MB.
|
||||
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
|
||||
let response = response_res?;
|
||||
// determine content length from response
|
||||
let response_content_length = match response.body().size_hint().upper() {
|
||||
Some(v) => v,
|
||||
None => MAX_ALLOWED_RESPONSE_SIZE + 1, // reject missing content length
|
||||
};
|
||||
// TODO: test how hyper handles the client providing an inaccurate content-length.
|
||||
if response_content_length <= MAX_ALLOWED_RESPONSE_SIZE {
|
||||
let (parts, body) = response.into_parts();
|
||||
// TODO: consider redirects
|
||||
if parts.status == http::StatusCode::OK {
|
||||
// parse body, determine if the username / key / address is present
|
||||
let body_bytes = hyper::body::to_bytes(body).await?;
|
||||
let body_matches = body_contains_user(&nip.local, pubkey, body_bytes)?;
|
||||
if body_matches {
|
||||
return Ok(UserWebVerificationStatus::Verified);
|
||||
}
|
||||
// successful response, parsed as a nip-05
|
||||
// document, but this name/pubkey was not
|
||||
// present.
|
||||
return Ok(UserWebVerificationStatus::Unverified);
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
"content length missing or exceeded limits for account: {:?}",
|
||||
nip.to_string()
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
info!("timeout verifying account {:?}", nip);
|
||||
return Ok(UserWebVerificationStatus::Unknown);
|
||||
}
|
||||
}
|
||||
Ok(UserWebVerificationStatus::Unknown)
|
||||
}
|
||||
|
||||
/// Perform NIP-05 verifier tasks.
|
||||
pub async fn run(&mut self) {
|
||||
// use this to schedule periodic re-validation tasks
|
||||
// run a loop, restarting on failure
|
||||
loop {
|
||||
let res = self.run_internal().await;
|
||||
if let Err(e) = res {
|
||||
info!("error in verifier: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal select loop for performing verification
|
||||
async fn run_internal(&mut self) -> Result<()> {
|
||||
tokio::select! {
|
||||
m = self.metadata_rx.recv() => {
|
||||
match m {
|
||||
Ok(e) => {
|
||||
if let Some(naddr) = e.get_nip05_addr() {
|
||||
info!("got metadata event for ({:?},{:?})", naddr.to_string() ,e.get_author_prefix());
|
||||
// Process a new author, checking if they are verified:
|
||||
let check_verified = get_latest_user_verification(self.read_pool.get().expect("could not get connection"), &e.pubkey).await;
|
||||
// ensure the event we got is more recent than the one we have, otherwise we can ignore it.
|
||||
if let Ok(last_check) = check_verified {
|
||||
if e.created_at <= last_check.event_created {
|
||||
// this metadata is from the same author as an existing verification.
|
||||
// it is older than what we have, so we can ignore it.
|
||||
debug!("received older metadata event for author {:?}", e.get_author_prefix());
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
// old, or no existing record for this user. In either case, we just create a new one.
|
||||
let start = Instant::now();
|
||||
let v = self.get_web_verification_from_event(&e).await;
|
||||
info!(
|
||||
"checked name {:?}, result: {:?}, in: {:?}",
|
||||
naddr.to_string(),
|
||||
v,
|
||||
start.elapsed()
|
||||
);
|
||||
// sleep to limit how frequently we make HTTP requests for new metadata events. This should limit us to 4 req/sec.
|
||||
tokio::time::sleep(Duration::from_millis(250)).await;
|
||||
// if this user was verified, we need to write the
|
||||
// record, persist the event, and broadcast.
|
||||
if let UserWebVerificationStatus::Verified = v {
|
||||
self.create_new_verified_user(&naddr.to_string(), &e).await?;
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(tokio::sync::broadcast::error::RecvError::Lagged(c)) => {
|
||||
warn!("incoming metadata events overwhelmed buffer, {} events dropped",c);
|
||||
}
|
||||
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
|
||||
info!("metadata broadcast channel closed");
|
||||
}
|
||||
}
|
||||
},
|
||||
_ = self.reverify_interval.tick() => {
|
||||
// check and see if there is an old account that needs
|
||||
// to be reverified
|
||||
self.do_reverify().await?;
|
||||
},
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reverify the oldest user verification record.
|
||||
async fn do_reverify(&mut self) -> Result<()> {
|
||||
let reverify_setting = self
|
||||
.settings
|
||||
.verified_users
|
||||
.verify_update_frequency_duration;
|
||||
let max_failures = self.settings.verified_users.max_consecutive_failures;
|
||||
// get from settings, but default to 6hrs between re-checking an account
|
||||
let reverify_dur = reverify_setting.unwrap_or_else(|| Duration::from_secs(60 * 60 * 6));
|
||||
// find all verification records that have success or failure OLDER than the reverify_dur.
|
||||
let now = SystemTime::now();
|
||||
let earliest = now - reverify_dur;
|
||||
let earliest_epoch = earliest
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map(|x| x.as_secs())
|
||||
.unwrap_or(0);
|
||||
let vr = get_oldest_user_verification(self.read_pool.get()?, earliest_epoch).await;
|
||||
match vr {
|
||||
Ok(ref v) => {
|
||||
let new_status = self.get_web_verification(&v.name, &v.address).await;
|
||||
match new_status {
|
||||
UserWebVerificationStatus::Verified => {
|
||||
// freshly verified account, update the
|
||||
// timestamp.
|
||||
self.update_verification_record(self.write_pool.get()?, v)
|
||||
.await?;
|
||||
}
|
||||
UserWebVerificationStatus::DomainNotAllowed
|
||||
| UserWebVerificationStatus::Unknown => {
|
||||
// server may be offline, or temporarily
|
||||
// blocked by the config file. Note the
|
||||
// failure so we can process something
|
||||
// else.
|
||||
|
||||
// have we had enough failures to give up?
|
||||
if v.failure_count >= max_failures as u64 {
|
||||
info!(
|
||||
"giving up on verifying {:?} after {} failures",
|
||||
v.name, v.failure_count
|
||||
);
|
||||
self.delete_verification_record(self.write_pool.get()?, v)
|
||||
.await?;
|
||||
} else {
|
||||
// record normal failure, incrementing failure count
|
||||
self.fail_verification_record(self.write_pool.get()?, v)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
UserWebVerificationStatus::Unverified => {
|
||||
// domain has removed the verification, drop
|
||||
// the record on our side.
|
||||
self.delete_verification_record(self.write_pool.get()?, v)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
|
||||
// No users need verification. Reset the interval to
|
||||
// the next verification attempt.
|
||||
let start = tokio::time::Instant::now() + self.wait_after_finish;
|
||||
self.reverify_interval = tokio::time::interval_at(start, self.http_wait_duration);
|
||||
}
|
||||
Err(ref e) => {
|
||||
warn!(
|
||||
"Error when checking for NIP-05 verification records: {:?}",
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reset the verification timestamp on a VerificationRecord
|
||||
pub async fn update_verification_record(
|
||||
&mut self,
|
||||
mut conn: db::PooledConnection,
|
||||
vr: &VerificationRecord,
|
||||
) -> Result<()> {
|
||||
let vr_id = vr.rowid;
|
||||
let vr_str = vr.to_string();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
// add some jitter to the verification to prevent everything from stacking up together.
|
||||
let verif_time = now_jitter(600);
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
// update verification time and reset any failure count
|
||||
let query =
|
||||
"UPDATE user_verification SET verified_at=?, failure_count=0 WHERE id=?";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![verif_time, vr_id])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("verification updated for {}", vr_str);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
/// Reset the failure timestamp on a VerificationRecord
|
||||
pub async fn fail_verification_record(
|
||||
&mut self,
|
||||
mut conn: db::PooledConnection,
|
||||
vr: &VerificationRecord,
|
||||
) -> Result<()> {
|
||||
let vr_id = vr.rowid;
|
||||
let vr_str = vr.to_string();
|
||||
let fail_count = vr.failure_count.saturating_add(1);
|
||||
tokio::task::spawn_blocking(move || {
|
||||
// add some jitter to the verification to prevent everything from stacking up together.
|
||||
let fail_time = now_jitter(600);
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let query = "UPDATE user_verification SET failed_at=?, failure_count=? WHERE id=?";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![fail_time, fail_count, vr_id])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("verification failed for {}", vr_str);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
/// Delete a VerificationRecord that is no longer valid
|
||||
pub async fn delete_verification_record(
|
||||
&mut self,
|
||||
mut conn: db::PooledConnection,
|
||||
vr: &VerificationRecord,
|
||||
) -> Result<()> {
|
||||
let vr_id = vr.rowid;
|
||||
let vr_str = vr.to_string();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let query = "DELETE FROM user_verification WHERE id=?;";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![vr_id])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("verification rescinded for {}", vr_str);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
|
||||
/// Persist an event, create a verification record, and broadcast.
|
||||
// TODO: have more event-writing logic handled in the db module.
|
||||
// Right now, these events avoid the rate limit. That is
|
||||
// acceptable since as soon as the user is registered, this path
|
||||
// is no longer used.
|
||||
// TODO: refactor these into spawn_blocking
|
||||
// calls to get them off the async executors.
|
||||
async fn create_new_verified_user(&mut self, name: &str, event: &Event) -> Result<()> {
|
||||
let start = Instant::now();
|
||||
// we should only do this if we are enabled. if we are
|
||||
// disabled/passive, the event has already been persisted.
|
||||
let should_write_event = self.settings.verified_users.is_enabled();
|
||||
if should_write_event {
|
||||
match db::write_event(&mut self.write_pool.get()?, event) {
|
||||
Ok(updated) => {
|
||||
if updated != 0 {
|
||||
info!(
|
||||
"persisted event (new verified pubkey): {:?} in {:?}",
|
||||
event.get_event_id_prefix(),
|
||||
start.elapsed()
|
||||
);
|
||||
self.event_tx.send(event.clone()).ok();
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("event insert failed: {:?}", err);
|
||||
if let Error::SqlError(r) = err {
|
||||
warn!("because: : {:?}", r);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// write the verification record
|
||||
save_verification_record(self.write_pool.get()?, event, name).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of checking user's verification status against DNS/HTTP.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
pub enum UserWebVerificationStatus {
|
||||
Verified, // user is verified, as of now.
|
||||
DomainNotAllowed, // domain blacklist or whitelist denied us from attempting a verification
|
||||
Unknown, // user's status could not be determined (timeout, server error)
|
||||
Unverified, // user's status is not verified (successful check, name / addr do not match)
|
||||
}
|
||||
|
||||
/// A NIP-05 verification record.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
// Basic information for a verification event. Gives us all we need to assert a NIP-05 address is good.
|
||||
pub struct VerificationRecord {
|
||||
pub rowid: u64, // database row for this verification event
|
||||
pub name: Nip05Name, // address being verified
|
||||
pub address: String, // pubkey
|
||||
pub event: String, // event ID hash providing the verification
|
||||
pub event_created: u64, // when the metadata event was published
|
||||
pub last_success: Option<u64>, // the most recent time a verification was provided. None if verification under this name has never succeeded.
|
||||
pub last_failure: Option<u64>, // the most recent time verification was attempted, but could not be completed.
|
||||
pub failure_count: u64, // how many consecutive failures have been observed.
|
||||
}
|
||||
|
||||
/// Check with settings to determine if a given domain is allowed to
|
||||
/// publish.
|
||||
pub fn is_domain_allowed(
|
||||
domain: &str,
|
||||
whitelist: &Option<Vec<String>>,
|
||||
blacklist: &Option<Vec<String>>,
|
||||
) -> bool {
|
||||
// if there is a whitelist, domain must be present in it.
|
||||
if let Some(wl) = whitelist {
|
||||
// workaround for Vec contains not accepting &str
|
||||
return wl.iter().any(|x| x == domain);
|
||||
}
|
||||
// otherwise, check that user is not in the blacklist
|
||||
if let Some(bl) = blacklist {
|
||||
return !bl.iter().any(|x| x == domain);
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
impl VerificationRecord {
|
||||
/// Check if the record is recent enough to be considered valid,
|
||||
/// and the domain is allowed.
|
||||
pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
|
||||
//let settings = SETTINGS.read().unwrap();
|
||||
// how long a verification record is good for
|
||||
let nip05_expiration = &verified_users_settings.verify_expiration_duration;
|
||||
if let Some(e) = nip05_expiration {
|
||||
if !self.is_current(e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// check domains
|
||||
is_domain_allowed(
|
||||
&self.name.domain,
|
||||
&verified_users_settings.domain_whitelist,
|
||||
&verified_users_settings.domain_blacklist,
|
||||
)
|
||||
}
|
||||
|
||||
/// Check if this record has been validated since the given
|
||||
/// duration.
|
||||
fn is_current(&self, d: &Duration) -> bool {
|
||||
match self.last_success {
|
||||
Some(s) => {
|
||||
// current time - duration
|
||||
let now = SystemTime::now();
|
||||
let cutoff = now - *d;
|
||||
let cutoff_epoch = cutoff
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map(|x| x.as_secs())
|
||||
.unwrap_or(0);
|
||||
s > cutoff_epoch
|
||||
}
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for VerificationRecord {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"({:?},{:?})",
|
||||
self.name.to_string(),
|
||||
self.address.chars().take(8).collect::<String>()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new verification record based on an event
|
||||
pub async fn save_verification_record(
|
||||
mut conn: db::PooledConnection,
|
||||
event: &Event,
|
||||
name: &str,
|
||||
) -> Result<()> {
|
||||
let e = hex::decode(&event.id).ok();
|
||||
let n = name.to_owned();
|
||||
let a_prefix = event.get_author_prefix();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
// if we create a /new/ one, we should get rid of any old ones. or group the new ones by name and only consider the latest.
|
||||
let query = "INSERT INTO user_verification (metadata_event, name, verified_at) VALUES ((SELECT id from event WHERE event_hash=?), ?, strftime('%s','now'));";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![e, n])?;
|
||||
// get the row ID
|
||||
let v_id = tx.last_insert_rowid();
|
||||
// delete everything else by this name
|
||||
let del_query = "DELETE FROM user_verification WHERE name = ? AND id != ?;";
|
||||
let mut del_stmt = tx.prepare(del_query)?;
|
||||
let count = del_stmt.execute(params![n,v_id])?;
|
||||
if count > 0 {
|
||||
info!("removed {} old verification records for ({:?},{:?})", count, n, a_prefix);
|
||||
}
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("saved new verification record for ({:?},{:?})", n, a_prefix);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
}).await?
|
||||
}
|
||||
|
||||
/// Retrieve the most recent verification record for a given pubkey (async).
|
||||
pub async fn get_latest_user_verification(
|
||||
conn: db::PooledConnection,
|
||||
pubkey: &str,
|
||||
) -> Result<VerificationRecord> {
|
||||
let p = pubkey.to_owned();
|
||||
tokio::task::spawn_blocking(move || query_latest_user_verification(conn, p)).await?
|
||||
}
|
||||
|
||||
/// Query database for the latest verification record for a given pubkey.
|
||||
pub fn query_latest_user_verification(
|
||||
mut conn: db::PooledConnection,
|
||||
pubkey: String,
|
||||
) -> Result<VerificationRecord> {
|
||||
let tx = conn.transaction()?;
|
||||
let query = "SELECT v.id, v.name, e.event_hash, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE e.author=? ORDER BY e.created_at DESC, v.verified_at DESC, v.failed_at DESC LIMIT 1;";
|
||||
let mut stmt = tx.prepare_cached(query)?;
|
||||
let fields = stmt.query_row(params![hex::decode(&pubkey).ok()], |r| {
|
||||
let rowid: u64 = r.get(0)?;
|
||||
let rowname: String = r.get(1)?;
|
||||
let eventid: Vec<u8> = r.get(2)?;
|
||||
let created_at: u64 = r.get(3)?;
|
||||
// create a tuple since we can't throw non-rusqlite errors in this closure
|
||||
Ok((
|
||||
rowid,
|
||||
rowname,
|
||||
eventid,
|
||||
created_at,
|
||||
r.get(4).ok(),
|
||||
r.get(5).ok(),
|
||||
r.get(6)?,
|
||||
))
|
||||
})?;
|
||||
Ok(VerificationRecord {
|
||||
rowid: fields.0,
|
||||
name: Nip05Name::try_from(&fields.1[..])?,
|
||||
address: pubkey,
|
||||
event: hex::encode(fields.2),
|
||||
event_created: fields.3,
|
||||
last_success: fields.4,
|
||||
last_failure: fields.5,
|
||||
failure_count: fields.6,
|
||||
})
|
||||
}
|
||||
|
||||
/// Retrieve the oldest user verification (async)
|
||||
pub async fn get_oldest_user_verification(
|
||||
conn: db::PooledConnection,
|
||||
earliest: u64,
|
||||
) -> Result<VerificationRecord> {
|
||||
tokio::task::spawn_blocking(move || query_oldest_user_verification(conn, earliest)).await?
|
||||
}
|
||||
|
||||
pub fn query_oldest_user_verification(
|
||||
mut conn: db::PooledConnection,
|
||||
earliest: u64,
|
||||
) -> Result<VerificationRecord> {
|
||||
let tx = conn.transaction()?;
|
||||
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v INNER JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
|
||||
let mut stmt = tx.prepare_cached(query)?;
|
||||
let fields = stmt.query_row(params![earliest, earliest], |r| {
|
||||
let rowid: u64 = r.get(0)?;
|
||||
let rowname: String = r.get(1)?;
|
||||
let eventid: Vec<u8> = r.get(2)?;
|
||||
let pubkey: Vec<u8> = r.get(3)?;
|
||||
let created_at: u64 = r.get(4)?;
|
||||
// create a tuple since we can't throw non-rusqlite errors in this closure
|
||||
Ok((
|
||||
rowid,
|
||||
rowname,
|
||||
eventid,
|
||||
pubkey,
|
||||
created_at,
|
||||
r.get(5).ok(),
|
||||
r.get(6).ok(),
|
||||
r.get(7)?,
|
||||
))
|
||||
})?;
|
||||
let vr = VerificationRecord {
|
||||
rowid: fields.0,
|
||||
name: Nip05Name::try_from(&fields.1[..])?,
|
||||
address: hex::encode(fields.3),
|
||||
event: hex::encode(fields.2),
|
||||
event_created: fields.4,
|
||||
last_success: fields.5,
|
||||
last_failure: fields.6,
|
||||
failure_count: fields.7,
|
||||
};
|
||||
Ok(vr)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn local_from_inet() {
|
||||
let addr = "bob@example.com";
|
||||
let parsed = Nip05Name::try_from(addr);
|
||||
assert!(!parsed.is_err());
|
||||
let v = parsed.unwrap();
|
||||
assert_eq!(v.local, "bob");
|
||||
assert_eq!(v.domain, "example.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn not_enough_sep() {
|
||||
let addr = "bob_example.com";
|
||||
let parsed = Nip05Name::try_from(addr);
|
||||
assert!(parsed.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn too_many_sep() {
|
||||
let addr = "foo@bob@example.com";
|
||||
let parsed = Nip05Name::try_from(addr);
|
||||
assert!(parsed.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_local_name() {
|
||||
// non-permitted ascii chars
|
||||
assert!(Nip05Name::try_from("foo!@example.com").is_err());
|
||||
assert!(Nip05Name::try_from("foo @example.com").is_err());
|
||||
assert!(Nip05Name::try_from(" foo@example.com").is_err());
|
||||
assert!(Nip05Name::try_from("f oo@example.com").is_err());
|
||||
assert!(Nip05Name::try_from("foo<@example.com").is_err());
|
||||
// unicode dash
|
||||
assert!(Nip05Name::try_from("foo‐bar@example.com").is_err());
|
||||
// emoji
|
||||
assert!(Nip05Name::try_from("foo😭bar@example.com").is_err());
|
||||
}
|
||||
#[test]
|
||||
fn invalid_domain_name() {
|
||||
// non-permitted ascii chars
|
||||
assert!(Nip05Name::try_from("foo@examp!e.com").is_err());
|
||||
assert!(Nip05Name::try_from("foo@ example.com").is_err());
|
||||
assert!(Nip05Name::try_from("foo@exa mple.com").is_err());
|
||||
assert!(Nip05Name::try_from("foo@example .com").is_err());
|
||||
assert!(Nip05Name::try_from("foo@exa<mple.com").is_err());
|
||||
// unicode dash
|
||||
assert!(Nip05Name::try_from("foobar@exa‐mple.com").is_err());
|
||||
// emoji
|
||||
assert!(Nip05Name::try_from("foobar@ex😭ample.com").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn to_url() {
|
||||
let nip = Nip05Name::try_from("foobar@example.com").unwrap();
|
||||
assert_eq!(
|
||||
nip.to_url(),
|
||||
Some(
|
||||
"https://example.com/.well-known/nostr.json?name=foobar"
|
||||
.parse()
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
86
src/notice.rs
Normal file
86
src/notice.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
pub enum EventResultStatus {
|
||||
Saved,
|
||||
Duplicate,
|
||||
Invalid,
|
||||
Blocked,
|
||||
RateLimited,
|
||||
Error,
|
||||
}
|
||||
|
||||
pub struct EventResult {
|
||||
pub id: String,
|
||||
pub msg: String,
|
||||
pub status: EventResultStatus,
|
||||
}
|
||||
|
||||
pub enum Notice {
|
||||
Message(String),
|
||||
EventResult(EventResult),
|
||||
}
|
||||
|
||||
impl EventResultStatus {
|
||||
pub fn to_bool(&self) -> bool {
|
||||
match self {
|
||||
Self::Saved => true,
|
||||
Self::Duplicate => true,
|
||||
Self::Invalid => false,
|
||||
Self::Blocked => false,
|
||||
Self::RateLimited => false,
|
||||
Self::Error => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn prefix(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Saved => "saved",
|
||||
Self::Duplicate => "duplicate",
|
||||
Self::Invalid => "invalid",
|
||||
Self::Blocked => "blocked",
|
||||
Self::RateLimited => "rate-limited",
|
||||
Self::Error => "error",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Notice {
|
||||
//pub fn err(err: error::Error, id: String) -> Notice {
|
||||
// Notice::err_msg(format!("{}", err), id)
|
||||
//}
|
||||
|
||||
pub fn message(msg: String) -> Notice {
|
||||
Notice::Message(msg)
|
||||
}
|
||||
|
||||
fn prefixed(id: String, msg: &str, status: EventResultStatus) -> Notice {
|
||||
let msg = format!("{}: {}", status.prefix(), msg);
|
||||
Notice::EventResult(EventResult { id, msg, status })
|
||||
}
|
||||
|
||||
pub fn invalid(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Invalid)
|
||||
}
|
||||
|
||||
pub fn blocked(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Blocked)
|
||||
}
|
||||
|
||||
pub fn rate_limited(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::RateLimited)
|
||||
}
|
||||
|
||||
pub fn duplicate(id: String) -> Notice {
|
||||
Notice::prefixed(id, "", EventResultStatus::Duplicate)
|
||||
}
|
||||
|
||||
pub fn error(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Error)
|
||||
}
|
||||
|
||||
pub fn saved(id: String) -> Notice {
|
||||
Notice::EventResult(EventResult {
|
||||
id,
|
||||
msg: "".into(),
|
||||
status: EventResultStatus::Saved,
|
||||
})
|
||||
}
|
||||
}
|
@@ -1,119 +0,0 @@
|
||||
//! Nostr protocol layered over WebSocket
|
||||
use crate::close::CloseCmd;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::event::EventCmd;
|
||||
use crate::subscription::Subscription;
|
||||
use core::pin::Pin;
|
||||
use futures::sink::Sink;
|
||||
use futures::stream::Stream;
|
||||
use futures::task::Context;
|
||||
use futures::task::Poll;
|
||||
use log::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio_tungstenite::WebSocketStream;
|
||||
use tungstenite::error::Error as WsError;
|
||||
use tungstenite::protocol::Message;
|
||||
|
||||
/// Nostr protocol messages from a client
|
||||
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
|
||||
#[serde(untagged)]
|
||||
pub enum NostrMessage {
|
||||
/// An `EVENT` message
|
||||
EventMsg(EventCmd),
|
||||
/// A `REQ` message
|
||||
SubMsg(Subscription),
|
||||
/// A `CLOSE` message
|
||||
CloseMsg(CloseCmd),
|
||||
}
|
||||
|
||||
/// Nostr protocol messages from a relay/server
|
||||
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
|
||||
pub enum NostrResponse {
|
||||
/// A `NOTICE` response
|
||||
NoticeRes(String),
|
||||
/// An `EVENT` response, composed of the subscription identifier,
|
||||
/// and serialized event JSON
|
||||
EventRes(String, String),
|
||||
}
|
||||
|
||||
/// A Nostr protocol stream is layered on top of a Websocket stream.
|
||||
pub struct NostrStream {
|
||||
ws_stream: WebSocketStream<TcpStream>,
|
||||
}
|
||||
|
||||
/// Given a websocket, return a protocol stream wrapper.
|
||||
pub fn wrap_ws_in_nostr(ws: WebSocketStream<TcpStream>) -> NostrStream {
|
||||
NostrStream { ws_stream: ws }
|
||||
}
|
||||
|
||||
/// Implement the [`Stream`] interface to produce Nostr messages.
|
||||
impl Stream for NostrStream {
|
||||
type Item = Result<NostrMessage>;
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
/// Convert Message to NostrMessage
|
||||
fn convert(msg: String) -> Result<NostrMessage> {
|
||||
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
|
||||
match parsed_res {
|
||||
Ok(m) => Ok(m),
|
||||
Err(e) => {
|
||||
debug!("proto parse error: {:?}", e);
|
||||
Err(Error::ProtoParseError)
|
||||
}
|
||||
}
|
||||
}
|
||||
match Pin::new(&mut self.ws_stream).poll_next(cx) {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(None) => Poll::Ready(None),
|
||||
Poll::Ready(Some(v)) => match v {
|
||||
Ok(Message::Text(vs)) => Poll::Ready(Some(convert(vs))),
|
||||
Ok(Message::Binary(_)) => Poll::Ready(Some(Err(Error::ProtoParseError))),
|
||||
Ok(Message::Pong(_)) | Ok(Message::Ping(_)) => Poll::Pending,
|
||||
Ok(Message::Close(_)) => Poll::Ready(None),
|
||||
Err(WsError::AlreadyClosed) | Err(WsError::ConnectionClosed) => Poll::Ready(None),
|
||||
Err(_) => Poll::Ready(Some(Err(Error::ConnError))),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement the [`Sink`] interface to produce Nostr responses.
|
||||
impl Sink<NostrResponse> for NostrStream {
|
||||
type Error = Error;
|
||||
|
||||
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
// map the error type
|
||||
match Pin::new(&mut self.ws_stream).poll_ready(cx) {
|
||||
Poll::Ready(Ok(())) => Poll::Ready(Ok(())),
|
||||
Poll::Ready(Err(_)) => Poll::Ready(Err(Error::ConnWriteError)),
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
|
||||
fn start_send(mut self: Pin<&mut Self>, item: NostrResponse) -> Result<(), Self::Error> {
|
||||
// TODO: do real escaping for these - at least on NOTICE,
|
||||
// which surely has some problems if arbitrary text is sent.
|
||||
let send_str = match item {
|
||||
NostrResponse::NoticeRes(msg) => {
|
||||
let s = msg.replace("\"", "");
|
||||
format!("[\"NOTICE\",\"{}\"]", s)
|
||||
}
|
||||
NostrResponse::EventRes(sub, eventstr) => {
|
||||
let subesc = sub.replace("\"", "");
|
||||
format!("[\"EVENT\",\"{}\",{}]", subesc, eventstr)
|
||||
}
|
||||
};
|
||||
match Pin::new(&mut self.ws_stream).start_send(Message::Text(send_str)) {
|
||||
Ok(()) => Ok(()),
|
||||
Err(_) => Err(Error::ConnWriteError),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
}
|
471
src/schema.rs
Normal file
471
src/schema.rs
Normal file
@@ -0,0 +1,471 @@
|
||||
//! Database schema and migrations
|
||||
use crate::db::PooledConnection;
|
||||
use crate::error::Result;
|
||||
use crate::event::{single_char_tagname, Event};
|
||||
use crate::utils::is_lower_hex;
|
||||
use const_format::formatcp;
|
||||
use rusqlite::limits::Limit;
|
||||
use rusqlite::params;
|
||||
use rusqlite::Connection;
|
||||
use std::cmp::Ordering;
|
||||
use std::time::Instant;
|
||||
use tracing::{debug, error, info};
|
||||
|
||||
/// Startup DB Pragmas
|
||||
pub const STARTUP_SQL: &str = r##"
|
||||
PRAGMA main.synchronous=NORMAL;
|
||||
PRAGMA foreign_keys = ON;
|
||||
PRAGMA journal_size_limit=32768;
|
||||
pragma mmap_size = 17179869184; -- cap mmap at 16GB
|
||||
"##;
|
||||
|
||||
/// Latest database version
|
||||
pub const DB_VERSION: usize = 11;
|
||||
|
||||
/// Schema definition
|
||||
const INIT_SQL: &str = formatcp!(
|
||||
r##"
|
||||
-- Database settings
|
||||
PRAGMA encoding = "UTF-8";
|
||||
PRAGMA journal_mode=WAL;
|
||||
PRAGMA main.synchronous=NORMAL;
|
||||
PRAGMA foreign_keys = ON;
|
||||
PRAGMA application_id = 1654008667;
|
||||
PRAGMA user_version = {};
|
||||
|
||||
-- Event Table
|
||||
CREATE TABLE IF NOT EXISTS event (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_hash BLOB NOT NULL, -- 4-byte hash
|
||||
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970)
|
||||
created_at INTEGER NOT NULL, -- when the event was authored
|
||||
author BLOB NOT NULL, -- author pubkey
|
||||
delegated_by BLOB, -- delegator pubkey (NIP-26)
|
||||
kind INTEGER NOT NULL, -- event kind
|
||||
hidden INTEGER, -- relevant for queries
|
||||
content TEXT NOT NULL -- serialized json of event object
|
||||
);
|
||||
|
||||
-- Event Indexes
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
|
||||
CREATE INDEX IF NOT EXISTS author_index ON event(author);
|
||||
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
|
||||
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
|
||||
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
|
||||
|
||||
-- Tag Table
|
||||
-- Tag values are stored as either a BLOB (if they come in as a
|
||||
-- hex-string), or TEXT otherwise.
|
||||
-- This means that searches need to select the appropriate column.
|
||||
CREATE TABLE IF NOT EXISTS tag (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
||||
name TEXT, -- the tag name ("p", "e", whatever)
|
||||
value TEXT, -- the tag value, if not hex.
|
||||
value_hex BLOB, -- the tag value, if it can be interpreted as a lowercase hex string.
|
||||
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_hex_index ON tag(value_hex);
|
||||
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
|
||||
|
||||
-- NIP-05 User Validation
|
||||
CREATE TABLE IF NOT EXISTS user_verification (
|
||||
id INTEGER PRIMARY KEY,
|
||||
metadata_event INTEGER NOT NULL, -- the metadata event used for this validation.
|
||||
name TEXT NOT NULL, -- the nip05 field value (user@domain).
|
||||
verified_at INTEGER, -- timestamp this author/nip05 was most recently verified.
|
||||
failed_at INTEGER, -- timestamp a verification attempt failed (host down).
|
||||
failure_count INTEGER DEFAULT 0, -- number of consecutive failures.
|
||||
FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
|
||||
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
|
||||
"##,
|
||||
DB_VERSION
|
||||
);
|
||||
|
||||
/// Determine the current application database schema version.
|
||||
pub fn curr_db_version(conn: &mut Connection) -> Result<usize> {
|
||||
let query = "PRAGMA user_version;";
|
||||
let curr_version = conn.query_row(query, [], |row| row.get(0))?;
|
||||
Ok(curr_version)
|
||||
}
|
||||
|
||||
fn mig_init(conn: &mut PooledConnection) -> Result<usize> {
|
||||
match conn.execute_batch(INIT_SQL) {
|
||||
Ok(()) => {
|
||||
info!(
|
||||
"database pragma/schema initialized to v{}, and ready",
|
||||
DB_VERSION
|
||||
);
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be initialized");
|
||||
}
|
||||
}
|
||||
Ok(DB_VERSION)
|
||||
}
|
||||
|
||||
/// Upgrade DB to latest version, and execute pragma settings
|
||||
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
// check the version.
|
||||
let mut curr_version = curr_db_version(conn)?;
|
||||
info!("DB version = {:?}", curr_version);
|
||||
|
||||
debug!(
|
||||
"SQLite max query parameters: {}",
|
||||
conn.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER)
|
||||
);
|
||||
debug!(
|
||||
"SQLite max table/blob/text length: {} MB",
|
||||
(conn.limit(Limit::SQLITE_LIMIT_LENGTH) as f64 / (1024 * 1024) as f64).floor()
|
||||
);
|
||||
debug!(
|
||||
"SQLite max SQL length: {} MB",
|
||||
(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH) as f64 / (1024 * 1024) as f64).floor()
|
||||
);
|
||||
|
||||
match curr_version.cmp(&DB_VERSION) {
|
||||
// Database is new or not current
|
||||
Ordering::Less => {
|
||||
// initialize from scratch
|
||||
if curr_version == 0 {
|
||||
curr_version = mig_init(conn)?;
|
||||
}
|
||||
// for initialized but out-of-date schemas, proceed to
|
||||
// upgrade sequentially until we are current.
|
||||
if curr_version == 1 {
|
||||
curr_version = mig_1_to_2(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 2 {
|
||||
curr_version = mig_2_to_3(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 3 {
|
||||
curr_version = mig_3_to_4(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 4 {
|
||||
curr_version = mig_4_to_5(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 5 {
|
||||
curr_version = mig_5_to_6(conn)?;
|
||||
}
|
||||
if curr_version == 6 {
|
||||
curr_version = mig_6_to_7(conn)?;
|
||||
}
|
||||
if curr_version == 7 {
|
||||
curr_version = mig_7_to_8(conn)?;
|
||||
}
|
||||
if curr_version == 8 {
|
||||
curr_version = mig_8_to_9(conn)?;
|
||||
}
|
||||
if curr_version == 9 {
|
||||
curr_version = mig_9_to_10(conn)?;
|
||||
}
|
||||
if curr_version == 10 {
|
||||
curr_version = mig_10_to_11(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == DB_VERSION {
|
||||
info!(
|
||||
"All migration scripts completed successfully. Welcome to v{}.",
|
||||
DB_VERSION
|
||||
);
|
||||
}
|
||||
}
|
||||
// Database is current, all is good
|
||||
Ordering::Equal => {
|
||||
debug!("Database version was already current (v{})", DB_VERSION);
|
||||
}
|
||||
// Database is newer than what this code understands, abort
|
||||
Ordering::Greater => {
|
||||
panic!(
|
||||
"Database version is newer than supported by this executable (v{} > v{})",
|
||||
curr_version, DB_VERSION
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Setup PRAGMA
|
||||
conn.execute_batch(STARTUP_SQL)?;
|
||||
debug!("SQLite PRAGMA startup completed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
//// Migration Scripts
|
||||
|
||||
fn mig_1_to_2(conn: &mut PooledConnection) -> Result<usize> {
|
||||
// only change is adding a hidden column to events.
|
||||
let upgrade_sql = r##"
|
||||
ALTER TABLE event ADD hidden INTEGER;
|
||||
UPDATE event SET hidden=FALSE;
|
||||
PRAGMA user_version = 2;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v1 -> v2");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(2)
|
||||
}
|
||||
|
||||
fn mig_2_to_3(conn: &mut PooledConnection) -> Result<usize> {
|
||||
// this version lacks the tag column
|
||||
info!("database schema needs update from 2->3");
|
||||
let upgrade_sql = r##"
|
||||
CREATE TABLE IF NOT EXISTS tag (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
||||
name TEXT, -- the tag name ("p", "e", whatever)
|
||||
value TEXT, -- the tag value, if not hex.
|
||||
value_hex BLOB, -- the tag value, if it can be interpreted as a hex string.
|
||||
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
PRAGMA user_version = 3;
|
||||
"##;
|
||||
// TODO: load existing refs into tag table
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v2 -> v3");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
// iterate over every event/pubkey tag
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let mut stmt = tx.prepare("select event_id, \"e\", lower(hex(referenced_event)) from event_ref union select event_id, \"p\", lower(hex(referenced_pubkey)) from pubkey_ref;")?;
|
||||
let mut tag_rows = stmt.query([])?;
|
||||
while let Some(row) = tag_rows.next()? {
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_id: u64 = row.get(0)?;
|
||||
let tag_name: String = row.get(1)?;
|
||||
let tag_value: String = row.get(2)?;
|
||||
// this will leave behind p/e tags that were non-hex, but they are invalid anyways.
|
||||
if is_lower_hex(&tag_value) {
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tag_name, hex::decode(&tag_value).ok()],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("Updated tag values");
|
||||
tx.commit()?;
|
||||
Ok(3)
|
||||
}
|
||||
|
||||
fn mig_3_to_4(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 3->4");
|
||||
let upgrade_sql = r##"
|
||||
-- incoming metadata events with nip05
|
||||
CREATE TABLE IF NOT EXISTS user_verification (
|
||||
id INTEGER PRIMARY KEY,
|
||||
metadata_event INTEGER NOT NULL, -- the metadata event used for this validation.
|
||||
name TEXT NOT NULL, -- the nip05 field value (user@domain).
|
||||
verified_at INTEGER, -- timestamp this author/nip05 was most recently verified.
|
||||
failed_at INTEGER, -- timestamp a verification attempt failed (host down).
|
||||
failure_count INTEGER DEFAULT 0, -- number of consecutive failures.
|
||||
FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
|
||||
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
|
||||
PRAGMA user_version = 4;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v3 -> v4");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(4)
|
||||
}
|
||||
|
||||
fn mig_4_to_5(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 4->5");
|
||||
let upgrade_sql = r##"
|
||||
DROP TABLE IF EXISTS event_ref;
|
||||
DROP TABLE IF EXISTS pubkey_ref;
|
||||
PRAGMA user_version=5;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v4 -> v5");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(5)
|
||||
}
|
||||
|
||||
fn mig_5_to_6(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 5->6");
|
||||
// We need to rebuild the tags table. iterate through the
|
||||
// event table. build event from json, insert tags into a
|
||||
// fresh tag table. This was needed due to a logic error in
|
||||
// how hex-like tags got indexed.
|
||||
let start = Instant::now();
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
// Clear out table
|
||||
tx.execute("DELETE FROM tag;", [])?;
|
||||
let mut stmt = tx.prepare("select id, content from event order by id;")?;
|
||||
let mut tag_rows = stmt.query([])?;
|
||||
while let Some(row) = tag_rows.next()? {
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_id: u64 = row.get(0)?;
|
||||
let event_json: String = row.get(1)?;
|
||||
let event: Event = serde_json::from_str(&event_json)?;
|
||||
// look at each event, and each tag, creating new tag entries if appropriate.
|
||||
for t in event.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagnamechar_opt = single_char_tagname(tagname);
|
||||
if tagnamechar_opt.is_none() {
|
||||
continue;
|
||||
}
|
||||
// safe because len was > 1
|
||||
let tagval = t.get(1).unwrap();
|
||||
// insert as BLOB if we can restore it losslessly.
|
||||
// this means it needs to be even length and lowercase.
|
||||
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tagname, hex::decode(tagval).ok()],
|
||||
)?;
|
||||
} else {
|
||||
// otherwise, insert as text
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tagname, &tagval],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
tx.execute("PRAGMA user_version = 6;", [])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("database schema upgraded v5 -> v6 in {:?}", start.elapsed());
|
||||
// vacuum after large table modification
|
||||
let start = Instant::now();
|
||||
conn.execute("VACUUM;", [])?;
|
||||
info!("vacuumed DB after tags rebuild in {:?}", start.elapsed());
|
||||
Ok(6)
|
||||
}
|
||||
|
||||
fn mig_6_to_7(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 6->7");
|
||||
// only change is adding a hidden column to events.
|
||||
let upgrade_sql = r##"
|
||||
ALTER TABLE event ADD delegated_by BLOB;
|
||||
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
|
||||
PRAGMA user_version = 7;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v6 -> v7");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(7)
|
||||
}
|
||||
|
||||
fn mig_7_to_8(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 7->8");
|
||||
// Remove redundant indexes, and add a better multi-column index.
|
||||
let upgrade_sql = r##"
|
||||
DROP INDEX IF EXISTS created_at_index;
|
||||
DROP INDEX IF EXISTS kind_index;
|
||||
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
|
||||
PRAGMA user_version = 8;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v7 -> v8");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(8)
|
||||
}
|
||||
|
||||
fn mig_8_to_9(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 8->9");
|
||||
// Those old indexes were actually helpful...
|
||||
let upgrade_sql = r##"
|
||||
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
|
||||
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
|
||||
PRAGMA user_version = 9;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v8 -> v9");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(9)
|
||||
}
|
||||
|
||||
fn mig_9_to_10(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 9->10");
|
||||
// Those old indexes were actually helpful...
|
||||
let upgrade_sql = r##"
|
||||
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
|
||||
PRAGMA user_version = 10;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v9 -> v10");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(10)
|
||||
}
|
||||
|
||||
fn mig_10_to_11(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 10->11");
|
||||
// Those old indexes were actually helpful...
|
||||
let upgrade_sql = r##"
|
||||
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
|
||||
reindex;
|
||||
pragma optimize;
|
||||
PRAGMA user_version = 11;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v10 -> v11");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(11)
|
||||
}
|
738
src/server.rs
Normal file
738
src/server.rs
Normal file
@@ -0,0 +1,738 @@
|
||||
//! Server process
|
||||
use crate::close::Close;
|
||||
use crate::close::CloseCmd;
|
||||
use crate::config::{Settings, VerifiedUsersMode};
|
||||
use crate::conn;
|
||||
use crate::db;
|
||||
use crate::db::SubmittedEvent;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::event::Event;
|
||||
use crate::event::EventCmd;
|
||||
use crate::info::RelayInfo;
|
||||
use crate::nip05;
|
||||
use crate::notice::Notice;
|
||||
use crate::subscription::Subscription;
|
||||
use futures::SinkExt;
|
||||
use futures::StreamExt;
|
||||
use governor::{Jitter, Quota, RateLimiter};
|
||||
use http::header::HeaderMap;
|
||||
use hyper::header::ACCEPT;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::upgrade::Upgraded;
|
||||
use hyper::{
|
||||
header, server::conn::AddrStream, upgrade, Body, Request, Response, Server, StatusCode,
|
||||
};
|
||||
use rusqlite::OpenFlags;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::Infallible;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::mpsc::Receiver as MpscReceiver;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use tokio::runtime::Builder;
|
||||
use tokio::sync::broadcast::{self, Receiver, Sender};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio_tungstenite::WebSocketStream;
|
||||
use tracing::*;
|
||||
use tungstenite::error::CapacityError::MessageTooLong;
|
||||
use tungstenite::error::Error as WsError;
|
||||
use tungstenite::handshake;
|
||||
use tungstenite::protocol::Message;
|
||||
use tungstenite::protocol::WebSocketConfig;
|
||||
|
||||
/// Handle arbitrary HTTP requests, including for WebSocket upgrades.
|
||||
async fn handle_web_request(
|
||||
mut request: Request<Body>,
|
||||
pool: db::SqlitePool,
|
||||
settings: Settings,
|
||||
remote_addr: SocketAddr,
|
||||
broadcast: Sender<Event>,
|
||||
event_tx: tokio::sync::mpsc::Sender<SubmittedEvent>,
|
||||
shutdown: Receiver<()>,
|
||||
) -> Result<Response<Body>, Infallible> {
|
||||
match (
|
||||
request.uri().path(),
|
||||
request.headers().contains_key(header::UPGRADE),
|
||||
) {
|
||||
// Request for / as websocket
|
||||
("/", true) => {
|
||||
trace!("websocket with upgrade request");
|
||||
//assume request is a handshake, so create the handshake response
|
||||
let response = match handshake::server::create_response_with_body(&request, || {
|
||||
Body::empty()
|
||||
}) {
|
||||
Ok(response) => {
|
||||
//in case the handshake response creation succeeds,
|
||||
//spawn a task to handle the websocket connection
|
||||
tokio::spawn(async move {
|
||||
//using the hyper feature of upgrading a connection
|
||||
match upgrade::on(&mut request).await {
|
||||
//if successfully upgraded
|
||||
Ok(upgraded) => {
|
||||
// set WebSocket configuration options
|
||||
let config = WebSocketConfig {
|
||||
max_message_size: settings.limits.max_ws_message_bytes,
|
||||
max_frame_size: settings.limits.max_ws_frame_bytes,
|
||||
..Default::default()
|
||||
};
|
||||
//create a websocket stream from the upgraded object
|
||||
let ws_stream = WebSocketStream::from_raw_socket(
|
||||
//pass the upgraded object
|
||||
//as the base layer stream of the Websocket
|
||||
upgraded,
|
||||
tokio_tungstenite::tungstenite::protocol::Role::Server,
|
||||
Some(config),
|
||||
)
|
||||
.await;
|
||||
let origin = get_header_string("origin", request.headers());
|
||||
let user_agent = get_header_string("user-agent", request.headers());
|
||||
// determine the remote IP from headers if the exist
|
||||
let header_ip = settings
|
||||
.network
|
||||
.remote_ip_header
|
||||
.as_ref()
|
||||
.and_then(|x| get_header_string(x, request.headers()));
|
||||
// use the socket addr as a backup
|
||||
let remote_ip =
|
||||
header_ip.unwrap_or_else(|| remote_addr.ip().to_string());
|
||||
let client_info = ClientInfo {
|
||||
remote_ip,
|
||||
user_agent,
|
||||
origin,
|
||||
};
|
||||
// spawn a nostr server with our websocket
|
||||
tokio::spawn(nostr_server(
|
||||
pool,
|
||||
client_info,
|
||||
settings,
|
||||
ws_stream,
|
||||
broadcast,
|
||||
event_tx,
|
||||
shutdown,
|
||||
));
|
||||
}
|
||||
// todo: trace, don't print...
|
||||
Err(e) => println!(
|
||||
"error when trying to upgrade connection \
|
||||
from address {} to websocket connection. \
|
||||
Error is: {}",
|
||||
remote_addr, e
|
||||
),
|
||||
}
|
||||
});
|
||||
//return the response to the handshake request
|
||||
response
|
||||
}
|
||||
Err(error) => {
|
||||
warn!("websocket response failed");
|
||||
let mut res =
|
||||
Response::new(Body::from(format!("Failed to create websocket: {}", error)));
|
||||
*res.status_mut() = StatusCode::BAD_REQUEST;
|
||||
return Ok(res);
|
||||
}
|
||||
};
|
||||
Ok::<_, Infallible>(response)
|
||||
}
|
||||
// Request for Relay info
|
||||
("/", false) => {
|
||||
// handle request at root with no upgrade header
|
||||
// Check if this is a nostr server info request
|
||||
let accept_header = &request.headers().get(ACCEPT);
|
||||
// check if application/nostr+json is included
|
||||
if let Some(media_types) = accept_header {
|
||||
if let Ok(mt_str) = media_types.to_str() {
|
||||
if mt_str.contains("application/nostr+json") {
|
||||
// build a relay info response
|
||||
debug!("Responding to server info request");
|
||||
let rinfo = RelayInfo::from(settings.info);
|
||||
let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap());
|
||||
return Ok(Response::builder()
|
||||
.status(200)
|
||||
.header("Content-Type", "application/nostr+json")
|
||||
.header("Access-Control-Allow-Origin", "*")
|
||||
.body(b)
|
||||
.unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Response::builder()
|
||||
.status(200)
|
||||
.header("Content-Type", "text/plain")
|
||||
.body(Body::from("Please use a Nostr client to connect."))
|
||||
.unwrap())
|
||||
}
|
||||
(_, _) => {
|
||||
//handle any other url
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::from("Nothing here."))
|
||||
.unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_header_string(header: &str, headers: &HeaderMap) -> Option<String> {
|
||||
headers
|
||||
.get(header)
|
||||
.and_then(|x| x.to_str().ok().map(|x| x.to_string()))
|
||||
}
|
||||
|
||||
// return on a control-c or internally requested shutdown signal
|
||||
async fn ctrl_c_or_signal(mut shutdown_signal: Receiver<()>) {
|
||||
let mut term_signal = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
|
||||
.expect("could not define signal");
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = shutdown_signal.recv() => {
|
||||
info!("Shutting down webserver as requested");
|
||||
// server shutting down, exit loop
|
||||
break;
|
||||
},
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
info!("Shutting down webserver due to SIGINT");
|
||||
break;
|
||||
},
|
||||
_ = term_signal.recv() => {
|
||||
info!("Shutting down webserver due to SIGTERM");
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Start running a Nostr relay server.
|
||||
pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result<(), Error> {
|
||||
trace!("Config: {:?}", settings);
|
||||
// do some config validation.
|
||||
if !Path::new(&settings.database.data_directory).is_dir() {
|
||||
error!("Database directory does not exist");
|
||||
return Err(Error::DatabaseDirError);
|
||||
}
|
||||
let addr = format!(
|
||||
"{}:{}",
|
||||
settings.network.address.trim(),
|
||||
settings.network.port
|
||||
);
|
||||
let socket_addr = addr.parse().expect("listening address not valid");
|
||||
// address whitelisting settings
|
||||
if let Some(addr_whitelist) = &settings.authorization.pubkey_whitelist {
|
||||
info!(
|
||||
"Event publishing restricted to {} pubkey(s)",
|
||||
addr_whitelist.len()
|
||||
);
|
||||
}
|
||||
// check if NIP-05 enforced user verification is on
|
||||
if settings.verified_users.is_active() {
|
||||
info!(
|
||||
"NIP-05 user verification mode:{:?}",
|
||||
settings.verified_users.mode
|
||||
);
|
||||
if let Some(d) = settings.verified_users.verify_update_duration() {
|
||||
info!("NIP-05 check user verification every: {:?}", d);
|
||||
}
|
||||
if let Some(d) = settings.verified_users.verify_expiration_duration() {
|
||||
info!("NIP-05 user verification expires after: {:?}", d);
|
||||
}
|
||||
if let Some(wl) = &settings.verified_users.domain_whitelist {
|
||||
info!("NIP-05 domain whitelist: {:?}", wl);
|
||||
}
|
||||
if let Some(bl) = &settings.verified_users.domain_blacklist {
|
||||
info!("NIP-05 domain blacklist: {:?}", bl);
|
||||
}
|
||||
}
|
||||
// configure tokio runtime
|
||||
let rt = Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.thread_name_fn(|| {
|
||||
// give each thread a unique numeric name
|
||||
static ATOMIC_ID: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(0);
|
||||
let id = ATOMIC_ID.fetch_add(1,Ordering::SeqCst);
|
||||
format!("tokio-ws-{}", id)
|
||||
})
|
||||
// limit concurrent SQLite blocking threads
|
||||
.max_blocking_threads(settings.limits.max_blocking_threads)
|
||||
.on_thread_start(|| {
|
||||
trace!("started new thread: {:?}", std::thread::current().name());
|
||||
})
|
||||
.on_thread_stop(|| {
|
||||
trace!("stopped thread: {:?}", std::thread::current().name());
|
||||
})
|
||||
.build()
|
||||
.unwrap();
|
||||
// start tokio
|
||||
rt.block_on(async {
|
||||
let broadcast_buffer_limit = settings.limits.broadcast_buffer;
|
||||
let persist_buffer_limit = settings.limits.event_persist_buffer;
|
||||
let verified_users_active = settings.verified_users.is_active();
|
||||
let db_min_conn = settings.database.min_conn;
|
||||
let db_max_conn = settings.database.max_conn;
|
||||
let settings = settings.clone();
|
||||
info!("listening on: {}", socket_addr);
|
||||
// all client-submitted valid events are broadcast to every
|
||||
// other client on this channel. This should be large enough
|
||||
// to accomodate slower readers (messages are dropped if
|
||||
// clients can not keep up).
|
||||
let (bcast_tx, _) = broadcast::channel::<Event>(broadcast_buffer_limit);
|
||||
// validated events that need to be persisted are sent to the
|
||||
// database on via this channel.
|
||||
let (event_tx, event_rx) = mpsc::channel::<SubmittedEvent>(persist_buffer_limit);
|
||||
// establish a channel for letting all threads now about a
|
||||
// requested server shutdown.
|
||||
let (invoke_shutdown, shutdown_listen) = broadcast::channel::<()>(1);
|
||||
// create a channel for sending any new metadata event. These
|
||||
// will get processed relatively slowly (a potentially
|
||||
// multi-second blocking HTTP call) on a single thread, so we
|
||||
// buffer requests on the channel. No harm in dropping events
|
||||
// here, since we are protecting against DoS. This can make
|
||||
// it difficult to setup initial metadata in bulk, since
|
||||
// overwhelming this will drop events and won't register
|
||||
// metadata events.
|
||||
let (metadata_tx, metadata_rx) = broadcast::channel::<Event>(4096);
|
||||
// start the database writer thread. Give it a channel for
|
||||
// writing events, and for publishing events that have been
|
||||
// written (to all connected clients).
|
||||
db::db_writer(
|
||||
settings.clone(),
|
||||
event_rx,
|
||||
bcast_tx.clone(),
|
||||
metadata_tx.clone(),
|
||||
shutdown_listen,
|
||||
)
|
||||
.await;
|
||||
info!("db writer created");
|
||||
|
||||
// create a nip-05 verifier thread; if enabled.
|
||||
if settings.verified_users.mode != VerifiedUsersMode::Disabled {
|
||||
let verifier_opt =
|
||||
nip05::Verifier::new(metadata_rx, bcast_tx.clone(), settings.clone());
|
||||
if let Ok(mut v) = verifier_opt {
|
||||
if verified_users_active {
|
||||
tokio::task::spawn(async move {
|
||||
info!("starting up NIP-05 verifier...");
|
||||
v.run().await;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
// build a connection pool for DB maintenance
|
||||
let maintenance_pool = db::build_pool(
|
||||
"maintenance writer",
|
||||
&settings,
|
||||
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
|
||||
1,
|
||||
2,
|
||||
false,
|
||||
);
|
||||
db::db_optimize(maintenance_pool.clone()).await;
|
||||
db::db_checkpoint(maintenance_pool).await;
|
||||
|
||||
// listen for (external to tokio) shutdown request
|
||||
let controlled_shutdown = invoke_shutdown.clone();
|
||||
tokio::spawn(async move {
|
||||
info!("control message listener started");
|
||||
match shutdown_rx.recv() {
|
||||
Ok(()) => {
|
||||
info!("control message requesting shutdown");
|
||||
controlled_shutdown.send(()).ok();
|
||||
}
|
||||
Err(std::sync::mpsc::RecvError) => {
|
||||
// FIXME: spurious error on startup?
|
||||
debug!("shutdown requestor is disconnected");
|
||||
}
|
||||
};
|
||||
});
|
||||
// listen for ctrl-c interruupts
|
||||
let ctrl_c_shutdown = invoke_shutdown.clone();
|
||||
// listener for webserver shutdown
|
||||
let webserver_shutdown_listen = invoke_shutdown.subscribe();
|
||||
|
||||
tokio::spawn(async move {
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
info!("shutting down due to SIGINT (main)");
|
||||
ctrl_c_shutdown.send(()).ok();
|
||||
});
|
||||
// build a connection pool for sqlite connections
|
||||
let pool = db::build_pool(
|
||||
"client query",
|
||||
&settings,
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
|
||||
db_min_conn,
|
||||
db_max_conn,
|
||||
true,
|
||||
);
|
||||
// spawn a task to check the pool size.
|
||||
let pool_monitor = pool.clone();
|
||||
tokio::spawn(async move {db::monitor_pool("reader", pool_monitor).await;});
|
||||
|
||||
// A `Service` is needed for every connection, so this
|
||||
// creates one from our `handle_request` function.
|
||||
let make_svc = make_service_fn(|conn: &AddrStream| {
|
||||
let svc_pool = pool.clone();
|
||||
let remote_addr = conn.remote_addr();
|
||||
let bcast = bcast_tx.clone();
|
||||
let event = event_tx.clone();
|
||||
let stop = invoke_shutdown.clone();
|
||||
let settings = settings.clone();
|
||||
async move {
|
||||
// service_fn converts our function into a `Service`
|
||||
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
|
||||
handle_web_request(
|
||||
request,
|
||||
svc_pool.clone(),
|
||||
settings.clone(),
|
||||
remote_addr,
|
||||
bcast.clone(),
|
||||
event.clone(),
|
||||
stop.subscribe(),
|
||||
)
|
||||
}))
|
||||
}
|
||||
});
|
||||
let server = Server::bind(&socket_addr)
|
||||
.serve(make_svc)
|
||||
.with_graceful_shutdown(ctrl_c_or_signal(webserver_shutdown_listen));
|
||||
// run hyper in this thread. This is why the thread does not return.
|
||||
if let Err(e) = server.await {
|
||||
eprintln!("server error: {}", e);
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Nostr protocol messages from a client
|
||||
#[derive(Deserialize, Serialize, Clone, PartialEq, Eq, Debug)]
|
||||
#[serde(untagged)]
|
||||
pub enum NostrMessage {
|
||||
/// An `EVENT` message
|
||||
EventMsg(EventCmd),
|
||||
/// A `REQ` message
|
||||
SubMsg(Subscription),
|
||||
/// A `CLOSE` message
|
||||
CloseMsg(CloseCmd),
|
||||
}
|
||||
|
||||
/// Convert Message to NostrMessage
|
||||
fn convert_to_msg(msg: String, max_bytes: Option<usize>) -> Result<NostrMessage> {
|
||||
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
|
||||
match parsed_res {
|
||||
Ok(m) => {
|
||||
if let NostrMessage::EventMsg(_) = m {
|
||||
if let Some(max_size) = max_bytes {
|
||||
// check length, ensure that some max size is set.
|
||||
if msg.len() > max_size && max_size > 0 {
|
||||
return Err(Error::EventMaxLengthError(msg.len()));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(m)
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("proto parse error: {:?}", e);
|
||||
debug!("parse error on message: {}", msg.trim());
|
||||
Err(Error::ProtoParseError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Turn a string into a NOTICE message ready to send over a WebSocket
|
||||
fn make_notice_message(notice: Notice) -> Message {
|
||||
let json = match notice {
|
||||
Notice::Message(ref msg) => json!(["NOTICE", msg]),
|
||||
Notice::EventResult(ref res) => json!(["OK", res.id, res.status.to_bool(), res.msg]),
|
||||
};
|
||||
|
||||
Message::text(json.to_string())
|
||||
}
|
||||
|
||||
struct ClientInfo {
|
||||
remote_ip: String,
|
||||
user_agent: Option<String>,
|
||||
origin: Option<String>,
|
||||
}
|
||||
|
||||
/// Handle new client connections. This runs through an event loop
|
||||
/// for all client communication.
|
||||
async fn nostr_server(
|
||||
pool: db::SqlitePool,
|
||||
client_info: ClientInfo,
|
||||
settings: Settings,
|
||||
mut ws_stream: WebSocketStream<Upgraded>,
|
||||
broadcast: Sender<Event>,
|
||||
event_tx: mpsc::Sender<SubmittedEvent>,
|
||||
mut shutdown: Receiver<()>,
|
||||
) {
|
||||
// the time this websocket nostr server started
|
||||
let orig_start = Instant::now();
|
||||
// get a broadcast channel for clients to communicate on
|
||||
let mut bcast_rx = broadcast.subscribe();
|
||||
// Track internal client state
|
||||
let mut conn = conn::ClientConn::new(client_info.remote_ip);
|
||||
// subscription creation rate limiting
|
||||
let mut sub_lim_opt = None;
|
||||
// 100ms jitter when the rate limiter returns
|
||||
let jitter = Jitter::up_to(Duration::from_millis(100));
|
||||
let sub_per_min_setting = settings.limits.subscriptions_per_min;
|
||||
if let Some(sub_per_min) = sub_per_min_setting {
|
||||
if sub_per_min > 0 {
|
||||
trace!("Rate limits for sub creation ({}/min)", sub_per_min);
|
||||
let quota_time = core::num::NonZeroU32::new(sub_per_min).unwrap();
|
||||
let quota = Quota::per_minute(quota_time);
|
||||
sub_lim_opt = Some(RateLimiter::direct(quota));
|
||||
}
|
||||
}
|
||||
// Use the remote IP as the client identifier
|
||||
let cid = conn.get_client_prefix();
|
||||
// Create a channel for receiving query results from the database.
|
||||
// we will send out the tx handle to any query we generate.
|
||||
// this has capacity for some of the larger requests we see, which
|
||||
// should allow the DB thread to release the handle earlier.
|
||||
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(20000);
|
||||
// Create channel for receiving NOTICEs
|
||||
let (notice_tx, mut notice_rx) = mpsc::channel::<Notice>(128);
|
||||
|
||||
// last time this client sent data (message, ping, etc.)
|
||||
let mut last_message_time = Instant::now();
|
||||
|
||||
// ping interval (every 5 minutes)
|
||||
let default_ping_dur = Duration::from_secs(settings.network.ping_interval_seconds.into());
|
||||
|
||||
// disconnect after 20 minutes without a ping response or event.
|
||||
let max_quiet_time = Duration::from_secs(60 * 20);
|
||||
|
||||
let start = tokio::time::Instant::now() + default_ping_dur;
|
||||
let mut ping_interval = tokio::time::interval_at(start, default_ping_dur);
|
||||
|
||||
// maintain a hashmap of a oneshot channel for active subscriptions.
|
||||
// when these subscriptions are cancelled, make a message
|
||||
// available to the executing query so it knows to stop.
|
||||
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
|
||||
// for stats, keep track of how many events the client published,
|
||||
// and how many it received from queries.
|
||||
let mut client_published_event_count: usize = 0;
|
||||
let mut client_received_event_count: usize = 0;
|
||||
debug!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
|
||||
let origin = client_info.origin.unwrap_or_else(|| "<unspecified>".into());
|
||||
let user_agent = client_info
|
||||
.user_agent
|
||||
.unwrap_or_else(|| "<unspecified>".into());
|
||||
debug!(
|
||||
"cid: {}, origin: {:?}, user-agent: {:?}",
|
||||
cid, origin, user_agent
|
||||
);
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = shutdown.recv() => {
|
||||
info!("Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?}", cid, conn.ip(), orig_start.elapsed());
|
||||
// server shutting down, exit loop
|
||||
break;
|
||||
},
|
||||
_ = ping_interval.tick() => {
|
||||
// check how long since we talked to client
|
||||
// if it has been too long, disconnect
|
||||
if last_message_time.elapsed() > max_quiet_time {
|
||||
debug!("ending connection due to lack of client ping response");
|
||||
break;
|
||||
}
|
||||
// Send a ping
|
||||
ws_stream.send(Message::Ping(Vec::new())).await.ok();
|
||||
},
|
||||
Some(notice_msg) = notice_rx.recv() => {
|
||||
ws_stream.send(make_notice_message(notice_msg)).await.ok();
|
||||
},
|
||||
Some(query_result) = query_rx.recv() => {
|
||||
// database informed us of a query result we asked for
|
||||
let subesc = query_result.sub_id.replace('"', "");
|
||||
if query_result.event == "EOSE" {
|
||||
let send_str = format!("[\"EOSE\",\"{}\"]", subesc);
|
||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||
} else {
|
||||
client_received_event_count += 1;
|
||||
// send a result
|
||||
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
|
||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||
}
|
||||
},
|
||||
// TODO: consider logging the LaggedRecv error
|
||||
Ok(global_event) = bcast_rx.recv() => {
|
||||
// an event has been broadcast to all clients
|
||||
// first check if there is a subscription for this event.
|
||||
for (s, sub) in conn.subscriptions() {
|
||||
if !sub.interested_in_event(&global_event) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO: serialize at broadcast time, instead of
|
||||
// once for each consumer.
|
||||
if let Ok(event_str) = serde_json::to_string(&global_event) {
|
||||
trace!("sub match for client: {}, sub: {:?}, event: {:?}",
|
||||
cid, s,
|
||||
global_event.get_event_id_prefix());
|
||||
// create an event response and send it
|
||||
let subesc = s.replace('"', "");
|
||||
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{}\",{}]", subesc, event_str))).await.ok();
|
||||
} else {
|
||||
warn!("could not serialize event: {:?}", global_event.get_event_id_prefix());
|
||||
}
|
||||
}
|
||||
},
|
||||
ws_next = ws_stream.next() => {
|
||||
// update most recent message time for client
|
||||
last_message_time = Instant::now();
|
||||
// Consume text messages from the client, parse into Nostr messages.
|
||||
let nostr_msg = match ws_next {
|
||||
Some(Ok(Message::Text(m))) => {
|
||||
convert_to_msg(m,settings.limits.max_event_bytes)
|
||||
},
|
||||
Some(Ok(Message::Binary(_))) => {
|
||||
ws_stream.send(
|
||||
make_notice_message(Notice::message("binary messages are not accepted".into()))).await.ok();
|
||||
continue;
|
||||
},
|
||||
Some(Ok(Message::Ping(_) | Message::Pong(_))) => {
|
||||
// get a ping/pong, ignore. tungstenite will
|
||||
// send responses automatically.
|
||||
continue;
|
||||
},
|
||||
Some(Err(WsError::Capacity(MessageTooLong{size, max_size}))) => {
|
||||
ws_stream.send(
|
||||
make_notice_message(Notice::message(format!("message too large ({} > {})",size, max_size)))).await.ok();
|
||||
continue;
|
||||
},
|
||||
None |
|
||||
Some(Ok(Message::Close(_)) |
|
||||
Err(WsError::AlreadyClosed | WsError::ConnectionClosed |
|
||||
WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
|
||||
=> {
|
||||
debug!("websocket close from client (cid: {}, ip: {:?})",cid, conn.ip());
|
||||
break;
|
||||
},
|
||||
Some(Err(WsError::Io(e))) => {
|
||||
// IO errors are considered fatal
|
||||
warn!("IO error (cid: {}, ip: {:?}): {:?}", cid, conn.ip(), e);
|
||||
break;
|
||||
}
|
||||
x => {
|
||||
// default condition on error is to close the client connection
|
||||
info!("unknown error (cid: {}, ip: {:?}): {:?} (closing conn)", cid, conn.ip(), x);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
// convert ws_next into proto_next
|
||||
match nostr_msg {
|
||||
Ok(NostrMessage::EventMsg(ec)) => {
|
||||
// An EventCmd needs to be validated to be converted into an Event
|
||||
// handle each type of message
|
||||
let evid = ec.event_id().to_owned();
|
||||
let parsed : Result<Event> = Result::<Event>::from(ec);
|
||||
match parsed {
|
||||
Ok(e) => {
|
||||
let id_prefix:String = e.id.chars().take(8).collect();
|
||||
debug!("successfully parsed/validated event: {:?} (cid: {})", id_prefix, cid);
|
||||
// check if the event is too far in the future.
|
||||
if e.is_valid_timestamp(settings.options.reject_future_seconds) {
|
||||
// Write this to the database.
|
||||
let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone() };
|
||||
event_tx.send(submit_event).await.ok();
|
||||
client_published_event_count += 1;
|
||||
} else {
|
||||
info!("client: {} sent a far future-dated event", cid);
|
||||
if let Some(fut_sec) = settings.options.reject_future_seconds {
|
||||
let msg = format!("The event created_at field is out of the acceptable range (+{}sec) for this relay.",fut_sec);
|
||||
let notice = Notice::invalid(e.id, &msg);
|
||||
ws_stream.send(make_notice_message(notice)).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
info!("client sent an invalid event (cid: {})", cid);
|
||||
ws_stream.send(make_notice_message(Notice::invalid(evid, &format!("{}", e)))).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
Ok(NostrMessage::SubMsg(s)) => {
|
||||
debug!("subscription requested (cid: {}, sub: {:?})", cid, s.id);
|
||||
// subscription handling consists of:
|
||||
// * check for rate limits
|
||||
// * registering the subscription so future events can be matched
|
||||
// * making a channel to cancel to request later
|
||||
// * sending a request for a SQL query
|
||||
// Do nothing if the sub already exists.
|
||||
if !conn.has_subscription(&s) {
|
||||
if let Some(ref lim) = sub_lim_opt {
|
||||
lim.until_ready_with_jitter(jitter).await;
|
||||
}
|
||||
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
|
||||
match conn.subscribe(s.clone()) {
|
||||
Ok(()) => {
|
||||
// when we insert, if there was a previous query running with the same name, cancel it.
|
||||
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
|
||||
previous_query.send(()).ok();
|
||||
}
|
||||
// start a database query. this spawns a blocking database query on a worker thread.
|
||||
db::db_query(s, cid.to_owned(), pool.clone(), query_tx.clone(), abandon_query_rx).await;
|
||||
},
|
||||
Err(e) => {
|
||||
info!("Subscription error: {} (cid: {}, sub: {:?})", e, cid, s.id);
|
||||
ws_stream.send(make_notice_message(Notice::message(format!("Subscription error: {}", e)))).await.ok();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("client sent duplicate subscription, ignoring (cid: {}, sub: {:?})", cid, s.id);
|
||||
}
|
||||
},
|
||||
Ok(NostrMessage::CloseMsg(cc)) => {
|
||||
// closing a request simply removes the subscription.
|
||||
let parsed : Result<Close> = Result::<Close>::from(cc);
|
||||
if let Ok(c) = parsed {
|
||||
// check if a query is currently
|
||||
// running, and remove it if so.
|
||||
let stop_tx = running_queries.remove(&c.id);
|
||||
if let Some(tx) = stop_tx {
|
||||
tx.send(()).ok();
|
||||
}
|
||||
// stop checking new events against
|
||||
// the subscription
|
||||
conn.unsubscribe(&c);
|
||||
} else {
|
||||
info!("invalid command ignored");
|
||||
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
|
||||
}
|
||||
},
|
||||
Err(Error::ConnError) => {
|
||||
debug!("got connection close/error, disconnecting cid: {}, ip: {:?}",cid, conn.ip());
|
||||
break;
|
||||
}
|
||||
Err(Error::EventMaxLengthError(s)) => {
|
||||
info!("client sent event larger ({} bytes) than max size (cid: {})", s, cid);
|
||||
ws_stream.send(make_notice_message(Notice::message("event exceeded max size".into()))).await.ok();
|
||||
},
|
||||
Err(Error::ProtoParseError) => {
|
||||
info!("client sent event that could not be parsed (cid: {})", cid);
|
||||
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
|
||||
},
|
||||
Err(e) => {
|
||||
info!("got non-fatal error from client (cid: {}, error: {:?}", cid, e);
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
// connection cleanup - ensure any still running queries are terminated.
|
||||
for (_, stop_tx) in running_queries {
|
||||
stop_tx.send(()).ok();
|
||||
}
|
||||
info!(
|
||||
"stopping client connection (cid: {}, ip: {:?}, sent: {} events, recv: {} events, connected: {:?})",
|
||||
cid,
|
||||
conn.ip(),
|
||||
client_published_event_count,
|
||||
client_received_event_count,
|
||||
orig_start.elapsed()
|
||||
);
|
||||
}
|
@@ -1,10 +1,14 @@
|
||||
//! Subscription and filter parsing
|
||||
use crate::error::Result;
|
||||
use crate::event::Event;
|
||||
use serde::de::Unexpected;
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// Subscription identifier and set of request filters
|
||||
#[derive(Serialize, PartialEq, Debug, Clone)]
|
||||
#[derive(Serialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Subscription {
|
||||
pub id: String,
|
||||
pub filters: Vec<ReqFilter>,
|
||||
@@ -15,23 +19,128 @@ pub struct Subscription {
|
||||
/// Corresponds to client-provided subscription request elements. Any
|
||||
/// element can be present if it should be used in filtering, or
|
||||
/// absent ([`None`]) if it should be ignored.
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
#[derive(Serialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct ReqFilter {
|
||||
/// Event hash
|
||||
pub id: Option<String>,
|
||||
/// Event kind
|
||||
pub kind: Option<u64>,
|
||||
/// Referenced event hash
|
||||
#[serde(rename = "#e")]
|
||||
pub event: Option<String>,
|
||||
/// Referenced public key for a petname
|
||||
#[serde(rename = "#p")]
|
||||
pub pubkey: Option<String>,
|
||||
/// Event hashes
|
||||
pub ids: Option<Vec<String>>,
|
||||
/// Event kinds
|
||||
pub kinds: Option<Vec<u64>>,
|
||||
/// Events published after this time
|
||||
pub since: Option<u64>,
|
||||
/// Events published before this time
|
||||
pub until: Option<u64>,
|
||||
/// List of author public keys
|
||||
pub authors: Option<Vec<String>>,
|
||||
/// Limit number of results
|
||||
pub limit: Option<u64>,
|
||||
/// Set of tags
|
||||
#[serde(skip)]
|
||||
pub tags: Option<HashMap<char, HashSet<String>>>,
|
||||
/// Force no matches due to malformed data
|
||||
// we can't represent it in the req filter, so we don't want to
|
||||
// erroneously match. This basically indicates the req tried to
|
||||
// do something invalid.
|
||||
pub force_no_match: bool,
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ReqFilter {
|
||||
fn deserialize<D>(deserializer: D) -> Result<ReqFilter, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let received: Value = Deserialize::deserialize(deserializer)?;
|
||||
let filter = received.as_object().ok_or_else(|| {
|
||||
serde::de::Error::invalid_type(
|
||||
Unexpected::Other("reqfilter is not an object"),
|
||||
&"a json object",
|
||||
)
|
||||
})?;
|
||||
let mut rf = ReqFilter {
|
||||
ids: None,
|
||||
kinds: None,
|
||||
since: None,
|
||||
until: None,
|
||||
authors: None,
|
||||
limit: None,
|
||||
tags: None,
|
||||
force_no_match: false,
|
||||
};
|
||||
let empty_string = "".into();
|
||||
let mut ts = None;
|
||||
// iterate through each key, and assign values that exist
|
||||
for (key, val) in filter.into_iter() {
|
||||
// ids
|
||||
if key == "ids" {
|
||||
let raw_ids: Option<Vec<String>>= Deserialize::deserialize(val).ok();
|
||||
if let Some(a) = raw_ids.as_ref() {
|
||||
if a.contains(&empty_string) {
|
||||
return Err(serde::de::Error::invalid_type(
|
||||
Unexpected::Other("prefix matches must not be empty strings"),
|
||||
&"a json object"));
|
||||
}
|
||||
}
|
||||
rf.ids =raw_ids;
|
||||
} else if key == "kinds" {
|
||||
rf.kinds = Deserialize::deserialize(val).ok();
|
||||
} else if key == "since" {
|
||||
rf.since = Deserialize::deserialize(val).ok();
|
||||
} else if key == "until" {
|
||||
rf.until = Deserialize::deserialize(val).ok();
|
||||
} else if key == "limit" {
|
||||
rf.limit = Deserialize::deserialize(val).ok();
|
||||
} else if key == "authors" {
|
||||
let raw_authors: Option<Vec<String>>= Deserialize::deserialize(val).ok();
|
||||
if let Some(a) = raw_authors.as_ref() {
|
||||
if a.contains(&empty_string) {
|
||||
return Err(serde::de::Error::invalid_type(
|
||||
Unexpected::Other("prefix matches must not be empty strings"),
|
||||
&"a json object"));
|
||||
}
|
||||
}
|
||||
rf.authors = raw_authors;
|
||||
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
|
||||
if let Some(tag_search) = tag_search_char_from_filter(key) {
|
||||
if ts.is_none() {
|
||||
// Initialize the tag if necessary
|
||||
ts = Some(HashMap::new());
|
||||
}
|
||||
if let Some(m) = ts.as_mut() {
|
||||
let tag_vals: Option<Vec<String>> = Deserialize::deserialize(val).ok();
|
||||
if let Some(v) = tag_vals {
|
||||
let hs = HashSet::from_iter(v.into_iter());
|
||||
m.insert(tag_search.to_owned(), hs);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
// tag search that is multi-character, don't add to subscription
|
||||
rf.force_no_match = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
rf.tags = ts;
|
||||
Ok(rf)
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to form a single-char identifier from a tag search filter
|
||||
fn tag_search_char_from_filter(tagname: &str) -> Option<char> {
|
||||
let tagname_nohash = &tagname[1..];
|
||||
// We return the tag character if and only if the tagname consists
|
||||
// of a single char.
|
||||
let mut tagnamechars = tagname_nohash.chars();
|
||||
let firstchar = tagnamechars.next();
|
||||
match firstchar {
|
||||
Some(_) => {
|
||||
// check second char
|
||||
if tagnamechars.next().is_none() {
|
||||
firstchar
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Subscription {
|
||||
@@ -41,7 +150,7 @@ impl<'de> Deserialize<'de> for Subscription {
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let mut v: serde_json::Value = Deserialize::deserialize(deserializer)?;
|
||||
let mut v: Value = Deserialize::deserialize(deserializer)?;
|
||||
// this shoud be a 3-or-more element array.
|
||||
// verify the first element is a String, REQ
|
||||
// get the subscription from the second element.
|
||||
@@ -76,6 +185,7 @@ impl<'de> Deserialize<'de> for Subscription {
|
||||
for fv in i {
|
||||
let f: ReqFilter = serde_json::from_value(fv.take())
|
||||
.map_err(|_| serde::de::Error::custom("could not parse filter"))?;
|
||||
// create indexes
|
||||
filters.push(f);
|
||||
}
|
||||
Ok(Subscription {
|
||||
@@ -102,46 +212,76 @@ impl Subscription {
|
||||
}
|
||||
}
|
||||
|
||||
impl ReqFilter {
|
||||
/// Check for a match within the authors list.
|
||||
// TODO: Ambiguity; what if the array is empty? Should we
|
||||
// consider that the same as null?
|
||||
fn authors_match(&self, event: &Event) -> bool {
|
||||
self.authors
|
||||
.as_ref()
|
||||
.map(|vs| vs.contains(&event.pubkey.to_owned()))
|
||||
.unwrap_or(true)
|
||||
fn prefix_match(prefixes: &[String], target: &str) -> bool {
|
||||
for prefix in prefixes {
|
||||
if target.starts_with(prefix) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
/// Check if this filter either matches, or does not care about the event tags.
|
||||
fn event_match(&self, event: &Event) -> bool {
|
||||
self.event
|
||||
// none matched
|
||||
false
|
||||
}
|
||||
|
||||
impl ReqFilter {
|
||||
fn ids_match(&self, event: &Event) -> bool {
|
||||
self.ids
|
||||
.as_ref()
|
||||
.map(|t| event.event_tag_match(t))
|
||||
.map(|vs| prefix_match(vs, &event.id))
|
||||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
/// Check if this filter either matches, or does not care about
|
||||
/// the pubkey/petname tags.
|
||||
fn pubkey_match(&self, event: &Event) -> bool {
|
||||
self.pubkey
|
||||
fn authors_match(&self, event: &Event) -> bool {
|
||||
self.authors
|
||||
.as_ref()
|
||||
.map(|t| event.pubkey_tag_match(t))
|
||||
.map(|vs| prefix_match(vs, &event.pubkey))
|
||||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
fn delegated_authors_match(&self, event: &Event) -> bool {
|
||||
if let Some(delegated_pubkey) = &event.delegated_by {
|
||||
self.authors
|
||||
.as_ref()
|
||||
.map(|vs| prefix_match(vs, delegated_pubkey))
|
||||
.unwrap_or(true)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn tag_match(&self, event: &Event) -> bool {
|
||||
// get the hashset from the filter.
|
||||
if let Some(map) = &self.tags {
|
||||
for (key, val) in map.iter() {
|
||||
let tag_match = event.generic_tag_val_intersect(*key, val);
|
||||
// if there is no match for this tag, the match fails.
|
||||
if !tag_match {
|
||||
return false;
|
||||
}
|
||||
// if there was a match, we move on to the next one.
|
||||
}
|
||||
}
|
||||
// if the tag map is empty, the match succeeds (there was no filter)
|
||||
true
|
||||
}
|
||||
|
||||
/// Check if this filter either matches, or does not care about the kind.
|
||||
fn kind_match(&self, kind: u64) -> bool {
|
||||
self.kind.map(|v| v == kind).unwrap_or(true)
|
||||
self.kinds
|
||||
.as_ref()
|
||||
.map(|ks| ks.contains(&kind))
|
||||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
/// Determine if all populated fields in this filter match the provided event.
|
||||
pub fn interested_in_event(&self, event: &Event) -> bool {
|
||||
self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
|
||||
// self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
|
||||
self.ids_match(event)
|
||||
&& self.since.map(|t| event.created_at > t).unwrap_or(true)
|
||||
&& self.until.map(|t| event.created_at < t).unwrap_or(true)
|
||||
&& self.kind_match(event.kind)
|
||||
&& self.authors_match(event)
|
||||
&& self.pubkey_match(event)
|
||||
&& self.event_match(event)
|
||||
&& (self.authors_match(event) || self.delegated_authors_match(event))
|
||||
&& self.tag_match(event)
|
||||
&& !self.force_no_match
|
||||
}
|
||||
}
|
||||
|
||||
@@ -172,54 +312,164 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_filter() {
|
||||
// unrecognized field in filter
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"foo\": 3}]";
|
||||
fn req_empty_authors_prefix() {
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"authors\": [\"\"]}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn req_empty_ids_prefix() {
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\"]}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn req_empty_ids_prefix_mixed() {
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\",\"aaa\"]}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn legacy_filter() {
|
||||
// legacy field in filter
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"kind\": 3}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn author_filter() -> Result<()> {
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"author\": \"test-author-id\"}]";
|
||||
let raw_json = r#"["REQ","some-id",{"authors": ["test-author-id"]}]"#;
|
||||
let s: Subscription = serde_json::from_str(raw_json)?;
|
||||
assert_eq!(s.id, "some-id");
|
||||
assert_eq!(s.filters.len(), 1);
|
||||
let first_filter = s.filters.get(0).unwrap();
|
||||
assert_eq!(first_filter.author, Some("test-author-id".to_owned()));
|
||||
assert_eq!(
|
||||
first_filter.authors,
|
||||
Some(vec!("test-author-id".to_owned()))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn interest_author_prefix_match() -> Result<()> {
|
||||
// subscription with a filter for ID
|
||||
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors": ["abc"]}]"#)?;
|
||||
let e = Event {
|
||||
id: "foo".to_owned(),
|
||||
pubkey: "abcd".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn interest_id_prefix_match() -> Result<()> {
|
||||
// subscription with a filter for ID
|
||||
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"]}]"#)?;
|
||||
let e = Event {
|
||||
id: "abcd".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn interest_id_nomatch() -> Result<()> {
|
||||
// subscription with a filter for ID
|
||||
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"id":"abc"}]"#)?;
|
||||
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"ids": ["xyz"]}]"#)?;
|
||||
let e = Event {
|
||||
id: "abcde".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert_eq!(s.interested_in_event(&e), false);
|
||||
assert!(!s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn interest_until() -> Result<()> {
|
||||
// subscription with a filter for ID and time
|
||||
let s: Subscription =
|
||||
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "until": 1000}]"#)?;
|
||||
let e = Event {
|
||||
id: "abc".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 50,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn interest_range() -> Result<()> {
|
||||
// subscription with a filter for ID and time
|
||||
let s_in: Subscription =
|
||||
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 200}]"#)?;
|
||||
let s_before: Subscription =
|
||||
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 140}]"#)?;
|
||||
let s_after: Subscription =
|
||||
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 160, "until": 200}]"#)?;
|
||||
let e = Event {
|
||||
id: "abc".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 150,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert!(s_in.interested_in_event(&e));
|
||||
assert!(!s_before.interested_in_event(&e));
|
||||
assert!(!s_after.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn interest_time_and_id() -> Result<()> {
|
||||
// subscription with a filter for ID and time
|
||||
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"id":"abc", "since": 1000}]"#)?;
|
||||
let s: Subscription =
|
||||
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 1000}]"#)?;
|
||||
let e = Event {
|
||||
id: "abc".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 50,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert_eq!(s.interested_in_event(&e), false);
|
||||
assert!(!s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -230,13 +480,15 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "abc".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 1001,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert_eq!(s.interested_in_event(&e), true);
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -247,13 +499,15 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "abc".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert_eq!(s.interested_in_event(&e), true);
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -264,30 +518,34 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "123".to_owned(),
|
||||
pubkey: "abc".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert_eq!(s.interested_in_event(&e), true);
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
|
||||
#[test]
|
||||
fn authors_multi_pubkey() -> Result<()> {
|
||||
// check for any of a set of authors, against the pubkey
|
||||
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors":["abc", "bcd"]}]"#)?;
|
||||
let e = Event {
|
||||
id: "123".to_owned(),
|
||||
pubkey: "bcd".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert_eq!(s.interested_in_event(&e), true);
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -298,13 +556,15 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "123".to_owned(),
|
||||
pubkey: "xyz".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert_eq!(s.interested_in_event(&e), false);
|
||||
assert!(!s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
33
src/utils.rs
Normal file
33
src/utils.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
//! Common utility functions
|
||||
use std::time::SystemTime;
|
||||
|
||||
/// Seconds since 1970.
|
||||
pub fn unix_time() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map(|x| x.as_secs())
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Check if a string contains only hex characters.
|
||||
pub fn is_hex(s: &str) -> bool {
|
||||
s.chars().all(|x| char::is_ascii_hexdigit(&x))
|
||||
}
|
||||
|
||||
/// Check if a string contains only lower-case hex chars.
|
||||
pub fn is_lower_hex(s: &str) -> bool {
|
||||
s.chars().all(|x| {
|
||||
(char::is_ascii_lowercase(&x) || char::is_ascii_digit(&x)) && char::is_ascii_hexdigit(&x)
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn lower_hex() {
|
||||
let hexstr = "abcd0123";
|
||||
assert_eq!(is_lower_hex(hexstr), true);
|
||||
}
|
||||
}
|
110
tests/common/mod.rs
Normal file
110
tests/common/mod.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
use nostr_rs_relay::config;
|
||||
use nostr_rs_relay::server::start_server;
|
||||
//use http::{Request, Response};
|
||||
use hyper::{Client, StatusCode, Uri};
|
||||
use std::net::TcpListener;
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
use std::sync::mpsc as syncmpsc;
|
||||
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
|
||||
use std::thread;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, info};
|
||||
|
||||
pub struct Relay {
|
||||
pub port: u16,
|
||||
pub handle: JoinHandle<()>,
|
||||
pub shutdown_tx: MpscSender<()>,
|
||||
}
|
||||
|
||||
pub fn start_relay() -> Result<Relay> {
|
||||
// setup tracing
|
||||
let _trace_sub = tracing_subscriber::fmt::try_init();
|
||||
info!("Starting a new relay");
|
||||
// replace default settings
|
||||
let mut settings = config::Settings::default();
|
||||
// identify open port
|
||||
info!("Checking for address...");
|
||||
let port = get_available_port().unwrap();
|
||||
info!("Found open port: {}", port);
|
||||
// bind to local interface only
|
||||
settings.network.address = "127.0.0.1".to_owned();
|
||||
settings.network.port = port;
|
||||
// create an in-memory DB with multiple readers
|
||||
settings.database.in_memory = true;
|
||||
settings.database.min_conn = 4;
|
||||
settings.database.max_conn = 8;
|
||||
let (shutdown_tx, shutdown_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
|
||||
let handle = thread::spawn(|| {
|
||||
// server will block the thread it is run on.
|
||||
let _ = start_server(settings, shutdown_rx);
|
||||
});
|
||||
// how do we know the relay has finished starting up?
|
||||
Ok(Relay {
|
||||
port,
|
||||
handle,
|
||||
shutdown_tx,
|
||||
})
|
||||
}
|
||||
|
||||
// check if the server is healthy via HTTP request
|
||||
async fn server_ready(relay: &Relay) -> Result<bool> {
|
||||
let uri: String = format!("http://127.0.0.1:{}/", relay.port);
|
||||
let client = Client::new();
|
||||
let uri: Uri = uri.parse().unwrap();
|
||||
let res = client.get(uri).await?;
|
||||
Ok(res.status() == StatusCode::OK)
|
||||
}
|
||||
|
||||
pub async fn wait_for_healthy_relay(relay: &Relay) -> Result<()> {
|
||||
// TODO: maximum time to wait for server to become healthy.
|
||||
// give it a little time to start up before we start polling
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
loop {
|
||||
let server_check = server_ready(relay).await;
|
||||
match server_check {
|
||||
Ok(true) => {
|
||||
// server responded with 200-OK.
|
||||
break;
|
||||
}
|
||||
Ok(false) => {
|
||||
// server responded with an error, we're done.
|
||||
return Err(anyhow!("Got non-200-OK from relay"));
|
||||
}
|
||||
Err(_) => {
|
||||
// server is not yet ready, probably connection refused...
|
||||
debug!("Relay not ready, will try again...");
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("relay is ready");
|
||||
Ok(())
|
||||
// simple message sent to web browsers
|
||||
//let mut request = Request::builder()
|
||||
// .uri("https://www.rust-lang.org/")
|
||||
// .header("User-Agent", "my-awesome-agent/1.0");
|
||||
}
|
||||
|
||||
// from https://elliotekj.com/posts/2017/07/25/find-available-tcp-port-rust/
|
||||
// This needed some modification; if multiple tasks all ask for open ports, they will tend to get the same one.
|
||||
// instead we should try to try these incrementally/globally.
|
||||
|
||||
static PORT_COUNTER: AtomicU16 = AtomicU16::new(4030);
|
||||
|
||||
fn get_available_port() -> Option<u16> {
|
||||
let startsearch = PORT_COUNTER.fetch_add(10, Ordering::SeqCst);
|
||||
if startsearch >= 20000 {
|
||||
// wrap around
|
||||
PORT_COUNTER.store(4030, Ordering::Relaxed);
|
||||
}
|
||||
(startsearch..20000).find(|port| port_is_available(*port))
|
||||
}
|
||||
pub fn port_is_available(port: u16) -> bool {
|
||||
info!("checking on port {}", port);
|
||||
match TcpListener::bind(("127.0.0.1", port)) {
|
||||
Ok(_) => true,
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
47
tests/integration_test.rs
Normal file
47
tests/integration_test.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
use anyhow::Result;
|
||||
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
mod common;
|
||||
|
||||
#[tokio::test]
|
||||
async fn start_and_stop() -> Result<()> {
|
||||
// this will be the common pattern for acquiring a new relay:
|
||||
// start a fresh relay, on a port to-be-provided back to us:
|
||||
let relay = common::start_relay()?;
|
||||
// wait for the relay's webserver to start up and deliver a page:
|
||||
common::wait_for_healthy_relay(&relay).await?;
|
||||
let port = relay.port;
|
||||
// just make sure we can startup and shut down.
|
||||
// if we send a shutdown message before the server is listening,
|
||||
// we will get a SendError. Keep sending until someone is
|
||||
// listening.
|
||||
loop {
|
||||
let shutdown_res = relay.shutdown_tx.send(());
|
||||
match shutdown_res {
|
||||
Ok(()) => {
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
}
|
||||
// wait for relay to shutdown
|
||||
let thread_join = relay.handle.join();
|
||||
assert!(thread_join.is_ok());
|
||||
// assert that port is now available.
|
||||
assert!(common::port_is_available(port));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn relay_home_page() -> Result<()> {
|
||||
// get a relay and wait for startup...
|
||||
let relay = common::start_relay()?;
|
||||
common::wait_for_healthy_relay(&relay).await?;
|
||||
// tell relay to shutdown
|
||||
let _res = relay.shutdown_tx.send(());
|
||||
Ok(())
|
||||
}
|
Reference in New Issue
Block a user