mirror of
https://github.com/scsibug/nostr-rs-relay.git
synced 2025-09-01 03:40:46 -04:00
Compare commits
183 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
6a8c4ed1b5 | ||
|
966c853700 | ||
|
65fd0ed08b | ||
|
0b51675b38 | ||
|
2e22334631 | ||
|
cb2ac4bf0f | ||
|
38dc7789dc | ||
|
ce0e00ffb3 | ||
|
3e4ae4aeec | ||
|
c6a8807485 | ||
|
8137b6211c | ||
|
29effaae23 | ||
|
e5074f2e46 | ||
|
4fd7643907 | ||
|
1e1ec69175 | ||
|
e08647867c | ||
|
ae0f7171ed | ||
|
4f1a912f36 | ||
|
95748647f0 | ||
|
25480e837f | ||
|
b80b54cd9d | ||
|
8ea732cbe5 | ||
|
0f68c4e5c2 | ||
|
dab2cd5792 | ||
|
f411aa6fc2 | ||
|
d31bbda087 | ||
|
5917bc53b2 | ||
|
91177c61a1 | ||
|
53c2a8051c | ||
|
168cf513ac | ||
|
ea204761c9 | ||
|
c270ae1434 | ||
|
64bd983cb6 | ||
|
1c153bc784 | ||
|
dc11d9a619 | ||
|
cd1557787b | ||
|
86bb7aeb9a | ||
|
ce37fc1a2d | ||
|
2cfd384339 | ||
|
8c013107f9 | ||
|
64a4466d30 | ||
|
1596c23eb4 | ||
|
129badd4e1 | ||
|
6f7c080180 | ||
|
af92561ef6 | ||
|
d833a3e40d | ||
|
462eb46642 | ||
|
cf144d503d | ||
|
fb8375aef2 | ||
|
88ac31b549 | ||
|
677b7d39e9 | ||
|
b24d2f9aaa | ||
|
7a3899d852 | ||
|
818108b793 | ||
|
d10348f7e1 | ||
|
8598e443d8 | ||
|
43222d44e5 | ||
|
7c1516c4fb | ||
|
0c72053a49 | ||
|
3f32ff67ab | ||
|
0b9778d6ca | ||
|
9be04120c7 | ||
|
cc06167e06 | ||
|
b6e33f044f | ||
|
1b2c6f9fca | ||
|
0d8d39ad22 | ||
|
0e851d4f71 | ||
|
3c880b2f49 | ||
|
7a4c9266ec | ||
|
e8557d421b | ||
|
7ca9c864f2 | ||
|
838aafd079 | ||
|
e554b10ac2 | ||
|
b0bfaa48fc | ||
|
2e9b1b6ba7 | ||
|
4d9012d94c | ||
|
ffe7aac066 | ||
|
f9695bd0a9 | ||
|
7c4bf5cc8f | ||
|
e2de162931 | ||
|
4f606615eb | ||
|
84a58ebbcd | ||
|
c48e45686d | ||
|
bbe359364a | ||
|
9e9c494367 | ||
|
5fa24bc9f1 | ||
|
4de7490d97 | ||
|
d0f63dc66e | ||
|
06078648c8 | ||
|
cc0fcc5d66 | ||
|
dfb2096653 | ||
|
486508d192 | ||
|
84b43c144b | ||
|
110500bb46 | ||
|
83f6b11de7 | ||
|
6d1244434b | ||
|
5a91419d34 | ||
|
7adc5c9af7 | ||
|
9dd4571bee | ||
|
9db5a26b9c | ||
|
ac345b5744 | ||
|
675662c7fb | ||
|
505b0cb71f | ||
|
e8aa450802 | ||
|
5a8860bb09 | ||
|
11e43eccf9 | ||
|
50577b2dfa | ||
|
a6cb6f8486 | ||
|
ae5bf98d87 | ||
|
1cf9d719f0 | ||
|
311f4b5283 | ||
|
14b5a51e3a | ||
|
8ecce3f566 | ||
|
caffbbbede | ||
|
81045ad3d0 | ||
|
72f8a1aa5c | ||
|
274c61bb72 | ||
|
6eeefbcc4c | ||
|
3e8adf978f | ||
|
2af5f9fbe8 | ||
|
2739e49362 | ||
|
f9693f7ac3 | ||
|
8a63d88b0b | ||
|
a4df9445b6 | ||
|
92da9d71f8 | ||
|
6633f8b472 | ||
|
93dfed0a87 | ||
|
bef7ca7e27 | ||
|
a98708ba47 | ||
|
ccf9b8d47b | ||
|
8fa58de49a | ||
|
480c5e4e58 | ||
|
5bd00f9107 | ||
|
36b9f628c7 | ||
|
baeb77af99 | ||
|
29b1e8ce58 | ||
|
786a354776 | ||
|
4fa8616c73 | ||
|
74802522c2 | ||
|
9ce5057af8 | ||
|
217429f538 | ||
|
62a9548c27 | ||
|
c24dce8177 | ||
|
3503cf05ed | ||
|
8738e5baa9 | ||
|
78da92ccca | ||
|
72f1c19b21 | ||
|
283967f8cc | ||
|
08b011ad07 | ||
|
2b03f11e5e | ||
|
e48bae10e6 | ||
|
8774416b92 | ||
|
59933ce25e | ||
|
1b9f364e15 | ||
|
4d983dd1e0 | ||
|
11c33582ef | ||
|
a754477a02 | ||
|
a843eaa939 | ||
|
03a130b0b8 | ||
|
9124f4540a | ||
|
77892b2064 | ||
|
4fe6191aa3 | ||
|
79a982e3ef | ||
|
01d81db617 | ||
|
e6fef37d4e | ||
|
4bbfd77fc1 | ||
|
8da6f6555a | ||
|
5bcc63bd56 | ||
|
035cf34673 | ||
|
be8170342e | ||
|
0a3b15f41f | ||
|
2b4b17dbda | ||
|
5058d98ad6 | ||
|
f4ecd43708 | ||
|
a8f465fdc8 | ||
|
1c14adc766 | ||
|
e894a86566 | ||
|
bedc378624 | ||
|
e1c2a6b758 | ||
|
990bb656e8 | ||
|
168cfc3b26 | ||
|
a36ad378f6 | ||
|
538d139ebf |
19
.build.yml
Normal file
19
.build.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
image: fedora/latest
|
||||
arch: x86_64
|
||||
artifacts:
|
||||
- nostr-rs-relay/target/release/nostr-rs-relay
|
||||
environment:
|
||||
RUST_LOG: debug
|
||||
packages:
|
||||
- cargo
|
||||
- sqlite-devel
|
||||
sources:
|
||||
- https://git.sr.ht/~gheartsfield/nostr-rs-relay/
|
||||
shell: false
|
||||
tasks:
|
||||
- build: |
|
||||
cd nostr-rs-relay
|
||||
cargo build --release
|
||||
- test: |
|
||||
cd nostr-rs-relay
|
||||
cargo test --release
|
2
.cargo/config.toml
Normal file
2
.cargo/config.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[build]
|
||||
rustflags = ["--cfg", "tokio_unstable"]
|
16
.pre-commit-config.yaml
Normal file
16
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.3.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: check-added-large-files
|
||||
- repo: https://github.com/doublify/pre-commit-rust
|
||||
rev: v1.0
|
||||
hooks:
|
||||
# - id: fmt
|
||||
- id: cargo-check
|
||||
- id: clippy
|
1324
Cargo.lock
generated
1324
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
66
Cargo.toml
66
Cargo.toml
@@ -1,32 +1,46 @@
|
||||
[package]
|
||||
name = "nostr-rs-relay"
|
||||
version = "0.6.0"
|
||||
version = "0.7.14"
|
||||
edition = "2021"
|
||||
authors = ["Greg Heartsfield <scsibug@imap.cc>"]
|
||||
description = "A relay implementation for the Nostr protocol"
|
||||
readme = "README.md"
|
||||
homepage = "https://sr.ht/~gheartsfield/nostr-rs-relay/"
|
||||
repository = "https://git.sr.ht/~gheartsfield/nostr-rs-relay"
|
||||
license = "MIT"
|
||||
keywords = ["nostr", "server"]
|
||||
categories = ["network-programming", "web-programming"]
|
||||
|
||||
[dependencies]
|
||||
log = "^0.4"
|
||||
env_logger = "^0.9"
|
||||
tokio = { version = "^1.16", features = ["full"] }
|
||||
futures = "^0.3"
|
||||
futures-util = "^0.3"
|
||||
tokio-tungstenite = "^0.17"
|
||||
tungstenite = "^0.17"
|
||||
thiserror = "^1"
|
||||
uuid = { version = "^0.8", features = ["v4"] }
|
||||
config = { version = "^0.12", features = ["toml"] }
|
||||
bitcoin_hashes = { version = "^0.10", features = ["serde"] }
|
||||
secp256k1 = {version = "^0.21", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
|
||||
serde = { version = "^1.0", features = ["derive"] }
|
||||
serde_json = {version = "^1.0", features = ["preserve_order"]}
|
||||
hex = "^0.4"
|
||||
rusqlite = { version = "^0.26", features = ["limits"]}
|
||||
r2d2 = "^0.8"
|
||||
r2d2_sqlite = "^0.19"
|
||||
lazy_static = "^1.4"
|
||||
governor = "^0.4"
|
||||
nonzero_ext = "^0.3"
|
||||
tracing = "0.1.36"
|
||||
tracing-subscriber = "0.2.0"
|
||||
tokio = { version = "1", features = ["full", "tracing", "signal"] }
|
||||
console-subscriber = "0.1.8"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
tokio-tungstenite = "0.17"
|
||||
tungstenite = "0.17"
|
||||
thiserror = "1"
|
||||
uuid = { version = "1.1.2", features = ["v4"] }
|
||||
config = { version = "0.12", features = ["toml"] }
|
||||
bitcoin_hashes = { version = "0.10", features = ["serde"] }
|
||||
secp256k1 = {version = "0.21", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = {version = "1.0", features = ["preserve_order"]}
|
||||
hex = "0.4"
|
||||
rusqlite = { version = "0.26", features = ["limits","bundled","modern_sqlite", "trace"]}
|
||||
r2d2 = "0.8"
|
||||
r2d2_sqlite = "0.19"
|
||||
lazy_static = "1.4"
|
||||
governor = "0.4"
|
||||
nonzero_ext = "0.3"
|
||||
hyper = { version="0.14", features=["client", "server","http1","http2","tcp"] }
|
||||
hyper-tls = "^0.5"
|
||||
http = { version = "^0.2" }
|
||||
parse_duration = "^2"
|
||||
rand = "^0.8"
|
||||
hyper-tls = "0.5"
|
||||
http = { version = "0.2" }
|
||||
parse_duration = "2"
|
||||
rand = "0.8"
|
||||
const_format = "0.2.28"
|
||||
regex = "1"
|
||||
|
||||
[dev-dependencies]
|
||||
anyhow = "1"
|
||||
|
16
Dockerfile
16
Dockerfile
@@ -1,18 +1,24 @@
|
||||
FROM rust:1.59.0 as builder
|
||||
FROM docker.io/library/rust:1.66.0 as builder
|
||||
|
||||
RUN USER=root cargo install cargo-auditable
|
||||
RUN USER=root cargo new --bin nostr-rs-relay
|
||||
WORKDIR ./nostr-rs-relay
|
||||
COPY ./Cargo.toml ./Cargo.toml
|
||||
COPY ./Cargo.lock ./Cargo.lock
|
||||
RUN cargo build --release
|
||||
# build dependencies only (caching)
|
||||
RUN cargo auditable build --release --locked
|
||||
# get rid of starter project code
|
||||
RUN rm src/*.rs
|
||||
|
||||
# copy project source code
|
||||
COPY ./src ./src
|
||||
|
||||
# build auditable release using locked deps
|
||||
RUN rm ./target/release/deps/nostr*relay*
|
||||
RUN cargo build --release
|
||||
RUN cargo auditable build --release --locked
|
||||
|
||||
FROM docker.io/library/debian:bullseye-slim
|
||||
|
||||
FROM debian:bullseye-20220125-slim
|
||||
ARG APP=/usr/src/app
|
||||
ARG APP_DATA=/usr/src/app/db
|
||||
RUN apt-get update \
|
||||
@@ -36,7 +42,7 @@ RUN chown -R $APP_USER:$APP_USER ${APP}
|
||||
USER $APP_USER
|
||||
WORKDIR ${APP}
|
||||
|
||||
ENV RUST_LOG=info
|
||||
ENV RUST_LOG=info,nostr_rs_relay=info
|
||||
ENV APP_DATA=${APP_DATA}
|
||||
|
||||
CMD ./nostr-rs-relay --db ${APP_DATA}
|
||||
|
99
README.md
99
README.md
@@ -1,26 +1,35 @@
|
||||
# [nostr-rs-relay](https://git.sr.ht/~gheartsfield/nostr-rs-relay)
|
||||
|
||||
This is a [nostr](https://github.com/fiatjaf/nostr) relay, written in
|
||||
Rust. It currently supports the entire relay protocol, and has a
|
||||
SQLite persistence layer.
|
||||
This is a [nostr](https://github.com/nostr-protocol/nostr) relay,
|
||||
written in Rust. It currently supports the entire relay protocol, and
|
||||
persists data with SQLite.
|
||||
|
||||
The project master repository is available on
|
||||
[sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is
|
||||
mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
|
||||
|
||||
[](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master?)
|
||||
|
||||
## Features
|
||||
|
||||
NIPs with a relay-specific implementation are listed here.
|
||||
[NIPs](https://github.com/nostr-protocol/nips) with a relay-specific implementation are listed here.
|
||||
|
||||
- [x] NIP-01: Core event model
|
||||
- [x] NIP-01: Hide old metadata events
|
||||
- [x] NIP-01: Id/Author prefix search (_experimental_)
|
||||
- [x] NIP-02: Hide old contact list events
|
||||
- [ ] NIP-03: OpenTimestamps
|
||||
- [x] NIP-05: Mapping Nostr keys to DNS identifiers
|
||||
- [x] NIP-09: Event deletion
|
||||
- [x] NIP-11: Relay information document
|
||||
- [x] NIP-12: Generic tag search (_experimental_)
|
||||
- [x] NIP-01: [Basic protocol flow description](https://github.com/nostr-protocol/nips/blob/master/01.md)
|
||||
* Core event model
|
||||
* Hide old metadata events
|
||||
* Id/Author prefix search
|
||||
- [x] NIP-02: [Contact List and Petnames](https://github.com/nostr-protocol/nips/blob/master/02.md)
|
||||
- [ ] NIP-03: [OpenTimestamps Attestations for Events](https://github.com/nostr-protocol/nips/blob/master/03.md)
|
||||
- [x] NIP-05: [Mapping Nostr keys to DNS-based internet identifiers](https://github.com/nostr-protocol/nips/blob/master/05.md)
|
||||
- [x] NIP-09: [Event Deletion](https://github.com/nostr-protocol/nips/blob/master/09.md)
|
||||
- [x] NIP-11: [Relay Information Document](https://github.com/nostr-protocol/nips/blob/master/11.md)
|
||||
- [x] NIP-12: [Generic Tag Queries](https://github.com/nostr-protocol/nips/blob/master/12.md)
|
||||
- [x] NIP-15: [End of Stored Events Notice](https://github.com/nostr-protocol/nips/blob/master/15.md)
|
||||
- [x] NIP-16: [Event Treatment](https://github.com/nostr-protocol/nips/blob/master/16.md)
|
||||
- [x] NIP-20: [Command Results](https://github.com/nostr-protocol/nips/blob/master/20.md)
|
||||
- [x] NIP-22: [Event `created_at` limits](https://github.com/nostr-protocol/nips/blob/master/22.md) (_future-dated events only_)
|
||||
- [x] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md)
|
||||
- [x] NIP-28: [Public Chat](https://github.com/nostr-protocol/nips/blob/master/28.md)
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -29,15 +38,32 @@ application. Use a bind mount to store the SQLite database outside of
|
||||
the container image, and map the container's 8080 port to a host port
|
||||
(7000 in the example below).
|
||||
|
||||
The examples below start a rootless podman container, mapping a local
|
||||
data directory and config file.
|
||||
|
||||
```console
|
||||
$ docker build -t nostr-rs-relay .
|
||||
$ podman build -t nostr-rs-relay .
|
||||
|
||||
$ docker run -it -p 7000:8080 \
|
||||
--mount src=$(pwd)/data,target=/usr/src/app/db,type=bind nostr-rs-relay
|
||||
$ mkdir data
|
||||
|
||||
[2021-12-31T19:58:31Z INFO nostr_rs_relay] listening on: 0.0.0.0:8080
|
||||
[2021-12-31T19:58:31Z INFO nostr_rs_relay::db] opened database "/usr/src/app/db/nostr.db" for writing
|
||||
[2021-12-31T19:58:31Z INFO nostr_rs_relay::db] DB version = 2
|
||||
$ podman unshare chown 100:100 data
|
||||
|
||||
$ podman run -it --rm -p 7000:8080 \
|
||||
--user=100:100 \
|
||||
-v $(pwd)/data:/usr/src/app/db:Z \
|
||||
-v $(pwd)/config.toml:/usr/src/app/config.toml:ro,Z \
|
||||
--name nostr-relay nostr-rs-relay:latest
|
||||
|
||||
Nov 19 15:31:15.013 INFO nostr_rs_relay: Starting up from main
|
||||
Nov 19 15:31:15.017 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080
|
||||
Nov 19 15:31:15.019 INFO nostr_rs_relay::server: db writer created
|
||||
Nov 19 15:31:15.019 INFO nostr_rs_relay::server: control message listener started
|
||||
Nov 19 15:31:15.019 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=4)
|
||||
Nov 19 15:31:15.019 INFO nostr_rs_relay::db: opened database "/usr/src/app/db/nostr.db" for writing
|
||||
Nov 19 15:31:15.019 INFO nostr_rs_relay::schema: DB version = 0
|
||||
Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: database pragma/schema initialized to v7, and ready
|
||||
Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: All migration scripts completed successfully. Welcome to v7.
|
||||
Nov 19 15:31:15.521 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=128)
|
||||
```
|
||||
|
||||
Use a `nostr` client such as
|
||||
@@ -56,6 +82,38 @@ Text Note [81cf...2652] from 296a...9b92 5 seconds ago
|
||||
A pre-built container is also available on DockerHub:
|
||||
https://hub.docker.com/r/scsibug/nostr-rs-relay
|
||||
|
||||
## Build and Run (without Docker)
|
||||
|
||||
Building `nostr-rs-relay` requires an installation of Cargo & Rust: https://www.rust-lang.org/tools/install
|
||||
|
||||
Clone this repository, and then build a release version of the relay:
|
||||
|
||||
```console
|
||||
$ git clone -q https://git.sr.ht/\~gheartsfield/nostr-rs-relay
|
||||
$ cd nostr-rs-relay
|
||||
$ cargo build -q -r
|
||||
```
|
||||
|
||||
The relay executable is now located in
|
||||
`target/release/nostr-rs-relay`. In order to run it with logging
|
||||
enabled, execute it with the `RUST_LOG` variable set:
|
||||
|
||||
```console
|
||||
$ RUST_LOG=warn,nostr_rs_relay=info ./target/release/nostr-rs-relay
|
||||
Dec 26 10:31:56.455 INFO nostr_rs_relay: Starting up from main
|
||||
Dec 26 10:31:56.464 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080
|
||||
Dec 26 10:31:56.466 INFO nostr_rs_relay::server: db writer created
|
||||
Dec 26 10:31:56.466 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=2)
|
||||
Dec 26 10:31:56.466 INFO nostr_rs_relay::db: opened database "./nostr.db" for writing
|
||||
Dec 26 10:31:56.466 INFO nostr_rs_relay::schema: DB version = 11
|
||||
Dec 26 10:31:56.467 INFO nostr_rs_relay::db: Built a connection pool "maintenance writer" (min=1, max=2)
|
||||
Dec 26 10:31:56.467 INFO nostr_rs_relay::server: control message listener started
|
||||
Dec 26 10:31:56.468 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=8)
|
||||
```
|
||||
|
||||
You now have a running relay, on port `8080`. Use a `nostr` client or
|
||||
`websocat` to connect and send/query for events.
|
||||
|
||||
## Configuration
|
||||
|
||||
The sample [`config.toml`](config.toml) file demonstrates the
|
||||
@@ -84,6 +142,9 @@ For development discussions, please feel free to use the [sourcehut
|
||||
mailing list](https://lists.sr.ht/~gheartsfield/nostr-rs-relay-devel).
|
||||
Or, drop by the [Nostr Telegram Channel](https://t.me/nostr_protocol).
|
||||
|
||||
To chat about `nostr-rs-relay` on `nostr` itself; visit our channel on [anigma](https://anigma.io/) or another client that supports [NIP-28](https://github.com/nostr-protocol/nips/blob/master/28.md) chats:
|
||||
* `2ad246a094fee48c6e455dd13d759d5f41b5a233120f5719d81ebc1935075194`
|
||||
|
||||
License
|
||||
---
|
||||
This project is MIT licensed.
|
||||
|
46
config.toml
46
config.toml
@@ -16,19 +16,29 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
|
||||
# Administrative contact URI
|
||||
#contact = "mailto:contact@example.com"
|
||||
|
||||
[diagnostics]
|
||||
# Enable tokio tracing (for use with tokio-console)
|
||||
#tracing = true
|
||||
|
||||
[database]
|
||||
# Directory for SQLite files. Defaults to the current directory. Can
|
||||
# also be specified (and overriden) with the "--db dirname" command
|
||||
# line option.
|
||||
data_directory = "."
|
||||
|
||||
|
||||
# Use an in-memory database instead of 'nostr.db'.
|
||||
# Caution; this will not survive a process restart!
|
||||
#in_memory = false
|
||||
|
||||
# Database connection pool settings for subscribers:
|
||||
|
||||
# Minimum number of SQLite reader connections
|
||||
#min_conn = 4
|
||||
|
||||
# Maximum number of SQLite reader connections
|
||||
#max_conn = 128
|
||||
# Maximum number of SQLite reader connections. Recommend setting this
|
||||
# to approx the number of cores.
|
||||
#max_conn = 8
|
||||
|
||||
[network]
|
||||
# Bind to this network address
|
||||
@@ -37,16 +47,40 @@ address = "0.0.0.0"
|
||||
# Listen on this port
|
||||
port = 8080
|
||||
|
||||
# If present, read this HTTP header for logging client IP addresses.
|
||||
# Examples for common proxies, cloudflare:
|
||||
#remote_ip_header = "x-forwarded-for"
|
||||
#remote_ip_header = "cf-connecting-ip"
|
||||
|
||||
# Websocket ping interval in seconds, defaults to 5 minutes
|
||||
#ping_interval = 300
|
||||
|
||||
[options]
|
||||
# Reject events that have timestamps greater than this many seconds in
|
||||
# the future. Defaults to rejecting anything greater than 30 minutes
|
||||
# from the current time.
|
||||
# the future. Recommended to reject anything greater than 30 minutes
|
||||
# from the current time, but the default is to allow any date.
|
||||
reject_future_seconds = 1800
|
||||
|
||||
[limits]
|
||||
# Limit events created per second, averaged over one minute. Must be
|
||||
# an integer. If not set (or set to 0), defaults to unlimited.
|
||||
#messages_per_sec = 0
|
||||
# an integer. If not set (or set to 0), defaults to unlimited. Note:
|
||||
# this is for the server as a whole, not per-connection.
|
||||
# messages_per_sec = 0
|
||||
|
||||
# Limit client subscriptions created per second, averaged over one
|
||||
# minute. Must be an integer. If not set (or set to 0), defaults to
|
||||
# unlimited.
|
||||
#subscriptions_per_min = 0
|
||||
|
||||
# UNIMPLEMENTED...
|
||||
# Limit how many concurrent database connections a client can have.
|
||||
# This prevents a single client from starting too many expensive
|
||||
# database queries. Must be an integer. If not set (or set to 0),
|
||||
# defaults to unlimited (subject to subscription limits).
|
||||
#db_conns_per_client = 0
|
||||
|
||||
# Limit blocking threads used for database connections. Defaults to 16.
|
||||
#max_blocking_threads = 16
|
||||
|
||||
# Limit the maximum size of an EVENT message. Defaults to 128 KB.
|
||||
# Set to 0 for unlimited.
|
||||
|
@@ -1,8 +1,8 @@
|
||||
# Reverse Proxy Setup Guide
|
||||
|
||||
It is recommended to run `nostr-rs-relay` behind a reverse proxy such
|
||||
as `haproxy` or `nginx` to provide TLS termination. A simple example
|
||||
of an `haproxy` configuration is documented here.
|
||||
as `haproxy` or `nginx` to provide TLS termination. Simple examples
|
||||
of `haproxy` and `nginx` configurations are documented here.
|
||||
|
||||
## Minimal HAProxy Configuration
|
||||
|
||||
@@ -46,8 +46,47 @@ backend relay
|
||||
server relay 127.0.0.1:8080
|
||||
```
|
||||
|
||||
### Notes
|
||||
### HAProxy Notes
|
||||
|
||||
You may experience WebSocket connection problems with Firefox if
|
||||
HTTP/2 is enabled, for older versions of HAProxy (2.3.x). Either
|
||||
disable HTTP/2 (`h2`), or upgrade HAProxy.
|
||||
|
||||
## Bare-bones Nginx Configuration
|
||||
|
||||
Assumptions:
|
||||
|
||||
* `Nginx` version is `1.18.0` (other versions not tested).
|
||||
* Hostname for the relay is `relay.example.com`.
|
||||
* SSL certificate and key are located at `/etc/letsencrypt/live/relay.example.com/`.
|
||||
* Relay is running on port `8080`.
|
||||
|
||||
```
|
||||
http {
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name relay.example.com;
|
||||
ssl_certificate /etc/letsencrypt/live/relay.example.com/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/relay.example.com/privkey.pem;
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
keepalive_timeout 70;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:8080;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Nginx Notes
|
||||
|
||||
The above configuration was tested on `nginx` `1.18.0` was tested on `Ubuntu 20.04`.
|
||||
|
||||
For help installing `nginx` on `Ubuntu`, see [this guide](https://www.digitalocean.com/community/tutorials/how-to-install-nginx-on-ubuntu-20-04).
|
||||
|
||||
For guidance on using `letsencrypt` to obtain a cert on `Ubuntu`, including an `nginx` plugin, see [this post](https://www.digitalocean.com/community/tutorials/how-to-secure-nginx-with-let-s-encrypt-on-ubuntu-20-04).
|
||||
|
@@ -1 +1,4 @@
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
#max_width = 140
|
||||
#chain_width = 100
|
||||
#fn_call_width = 100
|
||||
|
10
src/close.rs
10
src/close.rs
@@ -5,7 +5,7 @@ use crate::error::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Close command in network format
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct CloseCmd {
|
||||
/// Protocol command, expected to always be "CLOSE".
|
||||
cmd: String,
|
||||
@@ -14,7 +14,7 @@ pub struct CloseCmd {
|
||||
}
|
||||
|
||||
/// Identifier of the subscription to be closed.
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Close {
|
||||
/// The subscription identifier being closed.
|
||||
pub id: String,
|
||||
@@ -23,10 +23,10 @@ pub struct Close {
|
||||
impl From<CloseCmd> for Result<Close> {
|
||||
fn from(cc: CloseCmd) -> Result<Close> {
|
||||
// ensure command is correct
|
||||
if cc.cmd != "CLOSE" {
|
||||
Err(Error::CommandUnknownError)
|
||||
} else {
|
||||
if cc.cmd == "CLOSE" {
|
||||
Ok(Close { id: cc.id })
|
||||
} else {
|
||||
Err(Error::CommandUnknownError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,15 +1,8 @@
|
||||
//! Configuration file and settings management
|
||||
use config::{Config, ConfigError, File};
|
||||
use lazy_static::lazy_static;
|
||||
use log::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::RwLock;
|
||||
use std::time::Duration;
|
||||
|
||||
// initialize a singleton default configuration
|
||||
lazy_static! {
|
||||
pub static ref SETTINGS: RwLock<Settings> = RwLock::new(Settings::default());
|
||||
}
|
||||
use tracing::warn;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[allow(unused)]
|
||||
@@ -21,29 +14,31 @@ pub struct Info {
|
||||
pub contact: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Database {
|
||||
pub data_directory: String,
|
||||
pub in_memory: bool,
|
||||
pub min_conn: u32,
|
||||
pub max_conn: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Network {
|
||||
pub port: u16,
|
||||
pub address: String,
|
||||
pub remote_ip_header: Option<String>, // retrieve client IP from this HTTP header if present
|
||||
pub ping_interval_seconds: u32,
|
||||
}
|
||||
|
||||
//
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Options {
|
||||
pub reject_future_seconds: Option<usize>, // if defined, reject any events with a timestamp more than X seconds in the future
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Retention {
|
||||
// TODO: implement
|
||||
@@ -53,10 +48,13 @@ pub struct Retention {
|
||||
pub whitelist_addresses: Option<Vec<String>>, // whitelisted addresses (never delete)
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Limits {
|
||||
pub messages_per_sec: Option<u32>, // Artificially slow down event writing to limit disk consumption (averaged over 1 minute)
|
||||
pub subscriptions_per_min: Option<u32>, // Artificially slow down request (db query) creation to prevent abuse (averaged over 1 minute)
|
||||
pub db_conns_per_client: Option<u32>, // How many concurrent database queries (not subscriptions) may a client have?
|
||||
pub max_blocking_threads: usize,
|
||||
pub max_event_bytes: Option<usize>, // Maximum size of an EVENT message
|
||||
pub max_ws_message_bytes: Option<usize>,
|
||||
pub max_ws_frame_bytes: Option<usize>,
|
||||
@@ -64,13 +62,19 @@ pub struct Limits {
|
||||
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Authorization {
|
||||
pub pubkey_whitelist: Option<Vec<String>>, // If present, only allow these pubkeys to publish events
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Diagnostics {
|
||||
pub tracing: bool, // enables tokio console-subscriber
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum VerifiedUsersMode {
|
||||
Enabled,
|
||||
@@ -78,7 +82,7 @@ pub enum VerifiedUsersMode {
|
||||
Disabled,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct VerifiedUsers {
|
||||
pub mode: VerifiedUsersMode, // Mode of operation: "enabled" (enforce) or "passive" (check only). If none, this is simply disabled.
|
||||
@@ -97,37 +101,46 @@ impl VerifiedUsers {
|
||||
self.verify_update_frequency_duration = self.verify_update_duration();
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_enabled(&self) -> bool {
|
||||
self.mode == VerifiedUsersMode::Enabled
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_active(&self) -> bool {
|
||||
self.mode == VerifiedUsersMode::Enabled || self.mode == VerifiedUsersMode::Passive
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_passive(&self) -> bool {
|
||||
self.mode == VerifiedUsersMode::Passive
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn verify_expiration_duration(&self) -> Option<Duration> {
|
||||
self.verify_expiration
|
||||
.as_ref()
|
||||
.and_then(|x| parse_duration::parse(x).ok())
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn verify_update_duration(&self) -> Option<Duration> {
|
||||
self.verify_update_frequency
|
||||
.as_ref()
|
||||
.and_then(|x| parse_duration::parse(x).ok())
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_valid(&self) -> bool {
|
||||
self.verify_expiration_duration().is_some() && self.verify_update_duration().is_some()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Settings {
|
||||
pub info: Info,
|
||||
pub diagnostics: Diagnostics,
|
||||
pub database: Database,
|
||||
pub network: Network,
|
||||
pub limits: Limits,
|
||||
@@ -138,6 +151,7 @@ pub struct Settings {
|
||||
}
|
||||
|
||||
impl Settings {
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
let default_settings = Self::default();
|
||||
// attempt to construct settings with file
|
||||
@@ -157,22 +171,21 @@ impl Settings {
|
||||
// use defaults
|
||||
.add_source(Config::try_from(default)?)
|
||||
// override with file contents
|
||||
.add_source(File::with_name("config"))
|
||||
.build()?
|
||||
.try_into()
|
||||
.unwrap();
|
||||
.add_source(File::with_name("config.toml"))
|
||||
.build()?;
|
||||
let mut settings: Settings = config.try_deserialize()?;
|
||||
// ensure connection pool size is logical
|
||||
if settings.database.min_conn > settings.database.max_conn {
|
||||
panic!(
|
||||
"Database min_conn setting ({}) cannot exceed max_conn ({})",
|
||||
settings.database.min_conn, settings.database.max_conn
|
||||
);
|
||||
}
|
||||
assert!(
|
||||
settings.database.min_conn <= settings.database.max_conn,
|
||||
"Database min_conn setting ({}) cannot exceed max_conn ({})",
|
||||
settings.database.min_conn,
|
||||
settings.database.max_conn
|
||||
);
|
||||
// ensure durations parse
|
||||
if !settings.verified_users.is_valid() {
|
||||
panic!("VerifiedUsers time settings could not be parsed");
|
||||
}
|
||||
assert!(
|
||||
settings.verified_users.is_valid(),
|
||||
"VerifiedUsers time settings could not be parsed"
|
||||
);
|
||||
// initialize durations for verified users
|
||||
settings.verified_users.init();
|
||||
Ok(settings)
|
||||
@@ -189,17 +202,24 @@ impl Default for Settings {
|
||||
pubkey: None,
|
||||
contact: None,
|
||||
},
|
||||
diagnostics: Diagnostics { tracing: false },
|
||||
database: Database {
|
||||
data_directory: ".".to_owned(),
|
||||
in_memory: false,
|
||||
min_conn: 4,
|
||||
max_conn: 128,
|
||||
max_conn: 8,
|
||||
},
|
||||
network: Network {
|
||||
port: 8080,
|
||||
ping_interval_seconds: 300,
|
||||
address: "0.0.0.0".to_owned(),
|
||||
remote_ip_header: None,
|
||||
},
|
||||
limits: Limits {
|
||||
messages_per_sec: None,
|
||||
subscriptions_per_min: None,
|
||||
db_conns_per_client: None,
|
||||
max_blocking_threads: 16,
|
||||
max_event_bytes: Some(2 << 17), // 128K
|
||||
max_ws_message_bytes: Some(2 << 17), // 128K
|
||||
max_ws_frame_bytes: Some(2 << 17), // 128K
|
||||
@@ -226,7 +246,7 @@ impl Default for Settings {
|
||||
whitelist_addresses: None, // whitelisted addresses (never delete)
|
||||
},
|
||||
options: Options {
|
||||
reject_future_seconds: Some(30 * 60), // Reject events 30min in the future or greater
|
||||
reject_future_seconds: None, // Reject events in the future if defined
|
||||
},
|
||||
}
|
||||
}
|
||||
|
63
src/conn.rs
63
src/conn.rs
@@ -2,11 +2,10 @@
|
||||
use crate::close::Close;
|
||||
use crate::error::Error;
|
||||
use crate::error::Result;
|
||||
use crate::event::Event;
|
||||
|
||||
use crate::subscription::Subscription;
|
||||
use log::*;
|
||||
use std::collections::HashMap;
|
||||
use tracing::{debug, trace};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// A subscription identifier has a maximum length
|
||||
@@ -14,6 +13,8 @@ const MAX_SUBSCRIPTION_ID_LEN: usize = 256;
|
||||
|
||||
/// State for a client connection
|
||||
pub struct ClientConn {
|
||||
/// Client IP (either from socket, or configured proxy header
|
||||
client_ip: String,
|
||||
/// Unique client identifier generated at connection time
|
||||
client_id: Uuid,
|
||||
/// The current set of active client subscriptions
|
||||
@@ -24,46 +25,56 @@ pub struct ClientConn {
|
||||
|
||||
impl Default for ClientConn {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
Self::new("unknown".to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
impl ClientConn {
|
||||
/// Create a new, empty connection state.
|
||||
pub fn new() -> Self {
|
||||
#[must_use]
|
||||
pub fn new(client_ip: String) -> Self {
|
||||
let client_id = Uuid::new_v4();
|
||||
ClientConn {
|
||||
client_ip,
|
||||
client_id,
|
||||
subscriptions: HashMap::new(),
|
||||
max_subs: 32,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
|
||||
&self.subscriptions
|
||||
}
|
||||
|
||||
/// Check if the given subscription already exists
|
||||
pub fn has_subscription(&self, sub: &Subscription) -> bool {
|
||||
self.subscriptions.values().any(|x| x == sub)
|
||||
}
|
||||
|
||||
/// Get a short prefix of the client's unique identifier, suitable
|
||||
/// for logging.
|
||||
#[must_use]
|
||||
pub fn get_client_prefix(&self) -> String {
|
||||
self.client_id.to_string().chars().take(8).collect()
|
||||
}
|
||||
|
||||
/// Find all matching subscriptions.
|
||||
pub fn get_matching_subscriptions(&self, e: &Event) -> Vec<&str> {
|
||||
let mut v: Vec<&str> = vec![];
|
||||
for (id, sub) in self.subscriptions.iter() {
|
||||
if sub.interested_in_event(e) {
|
||||
v.push(id);
|
||||
}
|
||||
}
|
||||
v
|
||||
#[must_use]
|
||||
pub fn ip(&self) -> &str {
|
||||
&self.client_ip
|
||||
}
|
||||
|
||||
/// Add a new subscription for this connection.
|
||||
/// # Errors
|
||||
///
|
||||
/// Will return `Err` if the client has too many subscriptions, or
|
||||
/// if the provided name is excessively long.
|
||||
pub fn subscribe(&mut self, s: Subscription) -> Result<()> {
|
||||
let k = s.get_id();
|
||||
let sub_id_len = k.len();
|
||||
// prevent arbitrarily long subscription identifiers from
|
||||
// being used.
|
||||
if sub_id_len > MAX_SUBSCRIPTION_ID_LEN {
|
||||
info!(
|
||||
debug!(
|
||||
"ignoring sub request with excessive length: ({})",
|
||||
sub_id_len
|
||||
);
|
||||
@@ -72,8 +83,12 @@ impl ClientConn {
|
||||
// check if an existing subscription exists, and replace if so
|
||||
if self.subscriptions.contains_key(&k) {
|
||||
self.subscriptions.remove(&k);
|
||||
self.subscriptions.insert(k, s);
|
||||
debug!("replaced existing subscription");
|
||||
self.subscriptions.insert(k, s.clone());
|
||||
trace!(
|
||||
"replaced existing subscription (cid: {}, sub: {:?})",
|
||||
self.get_client_prefix(),
|
||||
s.get_id()
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -83,20 +98,22 @@ impl ClientConn {
|
||||
}
|
||||
// add subscription
|
||||
self.subscriptions.insert(k, s);
|
||||
debug!(
|
||||
"registered new subscription, currently have {} active subs",
|
||||
self.subscriptions.len()
|
||||
trace!(
|
||||
"registered new subscription, currently have {} active subs (cid: {})",
|
||||
self.subscriptions.len(),
|
||||
self.get_client_prefix(),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove the subscription for this connection.
|
||||
pub fn unsubscribe(&mut self, c: Close) {
|
||||
pub fn unsubscribe(&mut self, c: &Close) {
|
||||
// TODO: return notice if subscription did not exist.
|
||||
self.subscriptions.remove(&c.id);
|
||||
debug!(
|
||||
"removed subscription, currently have {} active subs",
|
||||
self.subscriptions.len()
|
||||
trace!(
|
||||
"removed subscription, currently have {} active subs (cid: {})",
|
||||
self.subscriptions.len(),
|
||||
self.get_client_prefix(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
403
src/delegation.rs
Normal file
403
src/delegation.rs
Normal file
@@ -0,0 +1,403 @@
|
||||
//! Event parsing and validation
|
||||
use crate::error::Error;
|
||||
use crate::error::Result;
|
||||
use crate::event::Event;
|
||||
use bitcoin_hashes::{sha256, Hash};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::str::FromStr;
|
||||
use tracing::{debug, info};
|
||||
|
||||
// This handles everything related to delegation, in particular the
|
||||
// condition/rune parsing and logic.
|
||||
|
||||
// Conditions are poorly specified, so we will implement the minimum
|
||||
// necessary for now.
|
||||
|
||||
// fields MUST be either "kind" or "created_at".
|
||||
// operators supported are ">", "<", "=", "!".
|
||||
// no operations on 'content' are supported.
|
||||
|
||||
// this allows constraints for:
|
||||
// valid date ranges (valid from X->Y dates).
|
||||
// specific kinds (publish kind=1,5)
|
||||
// kind ranges (publish ephemeral events, kind>19999&kind<30001)
|
||||
|
||||
// for more complex scenarios (allow delegatee to publish ephemeral
|
||||
// AND replacement events), it may be necessary to generate and use
|
||||
// different condition strings, since we do not support grouping or
|
||||
// "OR" logic.
|
||||
|
||||
lazy_static! {
|
||||
/// Secp256k1 verification instance.
|
||||
pub static ref SECP: Secp256k1<VerifyOnly> = Secp256k1::verification_only();
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub enum Field {
|
||||
Kind,
|
||||
CreatedAt,
|
||||
}
|
||||
|
||||
impl FromStr for Field {
|
||||
type Err = Error;
|
||||
fn from_str(value: &str) -> Result<Self, Self::Err> {
|
||||
if value == "kind" {
|
||||
Ok(Field::Kind)
|
||||
} else if value == "created_at" {
|
||||
Ok(Field::CreatedAt)
|
||||
} else {
|
||||
Err(Error::DelegationParseError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub enum Operator {
|
||||
LessThan,
|
||||
GreaterThan,
|
||||
Equals,
|
||||
NotEquals,
|
||||
}
|
||||
impl FromStr for Operator {
|
||||
type Err = Error;
|
||||
fn from_str(value: &str) -> Result<Self, Self::Err> {
|
||||
if value == "<" {
|
||||
Ok(Operator::LessThan)
|
||||
} else if value == ">" {
|
||||
Ok(Operator::GreaterThan)
|
||||
} else if value == "=" {
|
||||
Ok(Operator::Equals)
|
||||
} else if value == "!" {
|
||||
Ok(Operator::NotEquals)
|
||||
} else {
|
||||
Err(Error::DelegationParseError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct ConditionQuery {
|
||||
pub conditions: Vec<Condition>,
|
||||
}
|
||||
|
||||
impl ConditionQuery {
|
||||
pub fn allows_event(&self, event: &Event) -> bool {
|
||||
// check each condition, to ensure that the event complies
|
||||
// with the restriction.
|
||||
for c in &self.conditions {
|
||||
if !c.allows_event(event) {
|
||||
// any failing conditions invalidates the delegation
|
||||
// on this event
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// delegation was permitted unconditionally, or all conditions
|
||||
// were true
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that the delegator approved the delegation; return a ConditionQuery if so.
|
||||
pub fn validate_delegation(
|
||||
delegator: &str,
|
||||
delegatee: &str,
|
||||
cond_query: &str,
|
||||
sigstr: &str,
|
||||
) -> Option<ConditionQuery> {
|
||||
// form the token
|
||||
let tok = format!("nostr:delegation:{}:{}", delegatee, cond_query);
|
||||
// form SHA256 hash
|
||||
let digest: sha256::Hash = sha256::Hash::hash(tok.as_bytes());
|
||||
let sig = schnorr::Signature::from_str(sigstr).unwrap();
|
||||
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
|
||||
if let Ok(pubkey) = XOnlyPublicKey::from_str(delegator) {
|
||||
let verify = SECP.verify_schnorr(&sig, &msg, &pubkey);
|
||||
if verify.is_ok() {
|
||||
// return the parsed condition query
|
||||
cond_query.parse::<ConditionQuery>().ok()
|
||||
} else {
|
||||
debug!("client sent an delegation signature that did not validate");
|
||||
None
|
||||
}
|
||||
} else {
|
||||
debug!("client sent malformed delegation pubkey");
|
||||
None
|
||||
}
|
||||
} else {
|
||||
info!("error converting delegation digest to secp256k1 message");
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Parsed delegation condition
|
||||
/// see https://github.com/nostr-protocol/nips/pull/28#pullrequestreview-1084903800
|
||||
/// An example complex condition would be: kind=1,2,3&created_at<1665265999
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Condition {
|
||||
pub field: Field,
|
||||
pub operator: Operator,
|
||||
pub values: Vec<u64>,
|
||||
}
|
||||
|
||||
impl Condition {
|
||||
/// Check if this condition allows the given event to be delegated
|
||||
pub fn allows_event(&self, event: &Event) -> bool {
|
||||
// determine what the right-hand side of the operator is
|
||||
let resolved_field = match &self.field {
|
||||
Field::Kind => event.kind,
|
||||
Field::CreatedAt => event.created_at,
|
||||
};
|
||||
match &self.operator {
|
||||
Operator::LessThan => {
|
||||
// the less-than operator is only valid for single values.
|
||||
if self.values.len() == 1 {
|
||||
if let Some(v) = self.values.first() {
|
||||
return resolved_field < *v;
|
||||
}
|
||||
}
|
||||
}
|
||||
Operator::GreaterThan => {
|
||||
// the greater-than operator is only valid for single values.
|
||||
if self.values.len() == 1 {
|
||||
if let Some(v) = self.values.first() {
|
||||
return resolved_field > *v;
|
||||
}
|
||||
}
|
||||
}
|
||||
Operator::Equals => {
|
||||
// equals is interpreted as "must be equal to at least one provided value"
|
||||
return self.values.iter().any(|&x| resolved_field == x);
|
||||
}
|
||||
Operator::NotEquals => {
|
||||
// not-equals is interpreted as "must not be equal to any provided value"
|
||||
// this is the one case where an empty list of values could be allowed; even though it is a pointless restriction.
|
||||
return self.values.iter().all(|&x| resolved_field != x);
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn str_to_condition(cs: &str) -> Option<Condition> {
|
||||
// a condition is a string (alphanum+underscore), an operator (<>=!), and values (num+comma)
|
||||
lazy_static! {
|
||||
static ref RE: Regex = Regex::new("([[:word:]]+)([<>=!]+)([,[[:digit:]]]*)").unwrap();
|
||||
}
|
||||
// match against the regex
|
||||
let caps = RE.captures(cs)?;
|
||||
let field = caps.get(1)?.as_str().parse::<Field>().ok()?;
|
||||
let operator = caps.get(2)?.as_str().parse::<Operator>().ok()?;
|
||||
// values are just comma separated numbers, but all must be parsed
|
||||
let rawvals = caps.get(3)?.as_str();
|
||||
let values = rawvals
|
||||
.split_terminator(',')
|
||||
.map(|n| n.parse::<u64>().ok())
|
||||
.collect::<Option<Vec<_>>>()?;
|
||||
// convert field string into Field
|
||||
Some(Condition {
|
||||
field,
|
||||
operator,
|
||||
values,
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse a condition query from a string slice
|
||||
impl FromStr for ConditionQuery {
|
||||
type Err = Error;
|
||||
fn from_str(value: &str) -> Result<Self, Self::Err> {
|
||||
// split the string with '&'
|
||||
let mut conditions = vec![];
|
||||
let condstrs = value.split_terminator('&');
|
||||
// parse each individual condition
|
||||
for c in condstrs {
|
||||
conditions.push(str_to_condition(c).ok_or(Error::DelegationParseError)?);
|
||||
}
|
||||
Ok(ConditionQuery { conditions })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// parse condition strings
|
||||
#[test]
|
||||
fn parse_empty() -> Result<()> {
|
||||
// given an empty condition query, produce an empty vector
|
||||
let empty_cq = ConditionQuery { conditions: vec![] };
|
||||
let parsed = "".parse::<ConditionQuery>()?;
|
||||
assert_eq!(parsed, empty_cq);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// parse field 'kind'
|
||||
#[test]
|
||||
fn test_kind_field_parse() -> Result<()> {
|
||||
let field = "kind".parse::<Field>()?;
|
||||
assert_eq!(field, Field::Kind);
|
||||
Ok(())
|
||||
}
|
||||
// parse field 'created_at'
|
||||
#[test]
|
||||
fn test_created_at_field_parse() -> Result<()> {
|
||||
let field = "created_at".parse::<Field>()?;
|
||||
assert_eq!(field, Field::CreatedAt);
|
||||
Ok(())
|
||||
}
|
||||
// parse unknown field
|
||||
#[test]
|
||||
fn unknown_field_parse() {
|
||||
let field = "unk".parse::<Field>();
|
||||
assert!(field.is_err());
|
||||
}
|
||||
|
||||
// parse a full conditional query with an empty array
|
||||
#[test]
|
||||
fn parse_kind_equals_empty() -> Result<()> {
|
||||
// given an empty condition query, produce an empty vector
|
||||
let kind_cq = ConditionQuery {
|
||||
conditions: vec![Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::Equals,
|
||||
values: vec![],
|
||||
}],
|
||||
};
|
||||
let parsed = "kind=".parse::<ConditionQuery>()?;
|
||||
assert_eq!(parsed, kind_cq);
|
||||
Ok(())
|
||||
}
|
||||
// parse a full conditional query with a single value
|
||||
#[test]
|
||||
fn parse_kind_equals_singleval() -> Result<()> {
|
||||
// given an empty condition query, produce an empty vector
|
||||
let kind_cq = ConditionQuery {
|
||||
conditions: vec![Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::Equals,
|
||||
values: vec![1],
|
||||
}],
|
||||
};
|
||||
let parsed = "kind=1".parse::<ConditionQuery>()?;
|
||||
assert_eq!(parsed, kind_cq);
|
||||
Ok(())
|
||||
}
|
||||
// parse a full conditional query with multiple values
|
||||
#[test]
|
||||
fn parse_kind_equals_multival() -> Result<()> {
|
||||
// given an empty condition query, produce an empty vector
|
||||
let kind_cq = ConditionQuery {
|
||||
conditions: vec![Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::Equals,
|
||||
values: vec![1, 2, 4],
|
||||
}],
|
||||
};
|
||||
let parsed = "kind=1,2,4".parse::<ConditionQuery>()?;
|
||||
assert_eq!(parsed, kind_cq);
|
||||
Ok(())
|
||||
}
|
||||
// parse multiple conditions
|
||||
#[test]
|
||||
fn parse_multi_conditions() -> Result<()> {
|
||||
// given an empty condition query, produce an empty vector
|
||||
let cq = ConditionQuery {
|
||||
conditions: vec![
|
||||
Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::GreaterThan,
|
||||
values: vec![10000],
|
||||
},
|
||||
Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::LessThan,
|
||||
values: vec![20000],
|
||||
},
|
||||
Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::NotEquals,
|
||||
values: vec![10001],
|
||||
},
|
||||
Condition {
|
||||
field: Field::CreatedAt,
|
||||
operator: Operator::LessThan,
|
||||
values: vec![1665867123],
|
||||
},
|
||||
],
|
||||
};
|
||||
let parsed =
|
||||
"kind>10000&kind<20000&kind!10001&created_at<1665867123".parse::<ConditionQuery>()?;
|
||||
assert_eq!(parsed, cq);
|
||||
Ok(())
|
||||
}
|
||||
// Check for condition logic on event w/ empty values
|
||||
#[test]
|
||||
fn condition_with_empty_values() {
|
||||
let mut c = Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::GreaterThan,
|
||||
values: vec![],
|
||||
};
|
||||
let e = Event::simple_event();
|
||||
assert!(!c.allows_event(&e));
|
||||
c.operator = Operator::LessThan;
|
||||
assert!(!c.allows_event(&e));
|
||||
c.operator = Operator::Equals;
|
||||
assert!(!c.allows_event(&e));
|
||||
// Not Equals applied to an empty list *is* allowed
|
||||
// (pointless, but logically valid).
|
||||
c.operator = Operator::NotEquals;
|
||||
assert!(c.allows_event(&e));
|
||||
}
|
||||
|
||||
// Check for condition logic on event w/ single value
|
||||
#[test]
|
||||
fn condition_kind_gt_event_single() {
|
||||
let c = Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::GreaterThan,
|
||||
values: vec![10],
|
||||
};
|
||||
let mut e = Event::simple_event();
|
||||
// kind is not greater than 10, not allowed
|
||||
e.kind = 1;
|
||||
assert!(!c.allows_event(&e));
|
||||
// kind is greater than 10, allowed
|
||||
e.kind = 100;
|
||||
assert!(c.allows_event(&e));
|
||||
// kind is 10, not allowed
|
||||
e.kind = 10;
|
||||
assert!(!c.allows_event(&e));
|
||||
}
|
||||
// Check for condition logic on event w/ multi values
|
||||
#[test]
|
||||
fn condition_with_multi_values() {
|
||||
let mut c = Condition {
|
||||
field: Field::Kind,
|
||||
operator: Operator::Equals,
|
||||
values: vec![0, 10, 20],
|
||||
};
|
||||
let mut e = Event::simple_event();
|
||||
// Allow if event kind is in list for Equals
|
||||
e.kind = 10;
|
||||
assert!(c.allows_event(&e));
|
||||
// Deny if event kind is not in list for Equals
|
||||
e.kind = 11;
|
||||
assert!(!c.allows_event(&e));
|
||||
// Deny if event kind is in list for NotEquals
|
||||
e.kind = 10;
|
||||
c.operator = Operator::NotEquals;
|
||||
assert!(!c.allows_event(&e));
|
||||
// Allow if event kind is not in list for NotEquals
|
||||
e.kind = 99;
|
||||
c.operator = Operator::NotEquals;
|
||||
assert!(c.allows_event(&e));
|
||||
// Always deny if GreaterThan/LessThan for a list
|
||||
c.operator = Operator::LessThan;
|
||||
assert!(!c.allows_event(&e));
|
||||
c.operator = Operator::GreaterThan;
|
||||
assert!(!c.allows_event(&e));
|
||||
}
|
||||
}
|
14
src/error.rs
14
src/error.rs
@@ -17,10 +17,16 @@ pub enum Error {
|
||||
ConnWriteError,
|
||||
#[error("EVENT parse failed")]
|
||||
EventParseFailed,
|
||||
#[error("ClOSE message parse failed")]
|
||||
#[error("CLOSE message parse failed")]
|
||||
CloseParseFailed,
|
||||
#[error("Event validation failed")]
|
||||
EventInvalid,
|
||||
#[error("Event invalid signature")]
|
||||
EventInvalidSignature,
|
||||
#[error("Event invalid id")]
|
||||
EventInvalidId,
|
||||
#[error("Event malformed pubkey")]
|
||||
EventMalformedPubkey,
|
||||
#[error("Event could not canonicalize")]
|
||||
EventCouldNotCanonicalize,
|
||||
#[error("Event too large")]
|
||||
EventMaxLengthError(usize),
|
||||
#[error("Subscription identifier max length exceeded")]
|
||||
@@ -50,6 +56,8 @@ pub enum Error {
|
||||
HyperError(hyper::Error),
|
||||
#[error("Hex encoding error")]
|
||||
HexError(hex::FromHexError),
|
||||
#[error("Delegation parse error")]
|
||||
DelegationParseError,
|
||||
#[error("Unknown/Undocumented")]
|
||||
UnknownError,
|
||||
}
|
||||
|
237
src/event.rs
237
src/event.rs
@@ -1,12 +1,11 @@
|
||||
//! Event parsing and validation
|
||||
use crate::config;
|
||||
use crate::delegation::validate_delegation;
|
||||
use crate::error::Error::*;
|
||||
use crate::error::Result;
|
||||
use crate::nip05;
|
||||
use crate::utils::unix_time;
|
||||
use bitcoin_hashes::{sha256, Hash};
|
||||
use lazy_static::lazy_static;
|
||||
use log::*;
|
||||
use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
use serde_json::value::Value;
|
||||
@@ -14,6 +13,7 @@ use serde_json::Number;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::str::FromStr;
|
||||
use tracing::{debug, info};
|
||||
|
||||
lazy_static! {
|
||||
/// Secp256k1 verification instance.
|
||||
@@ -21,27 +21,35 @@ lazy_static! {
|
||||
}
|
||||
|
||||
/// Event command in network format.
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct EventCmd {
|
||||
cmd: String, // expecting static "EVENT"
|
||||
event: Event,
|
||||
}
|
||||
|
||||
impl EventCmd {
|
||||
pub fn event_id(&self) -> &str {
|
||||
&self.event.id
|
||||
}
|
||||
}
|
||||
|
||||
/// Parsed nostr event.
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Event {
|
||||
pub id: String,
|
||||
pub(crate) pubkey: String,
|
||||
pub(crate) created_at: u64,
|
||||
pub(crate) kind: u64,
|
||||
pub pubkey: String,
|
||||
#[serde(skip)]
|
||||
pub delegated_by: Option<String>,
|
||||
pub created_at: u64,
|
||||
pub kind: u64,
|
||||
#[serde(deserialize_with = "tag_from_string")]
|
||||
// NOTE: array-of-arrays may need to be more general than a string container
|
||||
pub(crate) tags: Vec<Vec<String>>,
|
||||
pub(crate) content: String,
|
||||
pub(crate) sig: String,
|
||||
// Optimization for tag search, built on demand
|
||||
pub tags: Vec<Vec<String>>,
|
||||
pub content: String,
|
||||
pub sig: String,
|
||||
// Optimization for tag search, built on demand.
|
||||
#[serde(skip)]
|
||||
pub(crate) tagidx: Option<HashMap<String, HashSet<String>>>,
|
||||
pub tagidx: Option<HashMap<char, HashSet<String>>>,
|
||||
}
|
||||
|
||||
/// Simple tag type for array of array of strings.
|
||||
@@ -53,7 +61,26 @@ where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let opt = Option::deserialize(deserializer)?;
|
||||
Ok(opt.unwrap_or_else(Vec::new))
|
||||
Ok(opt.unwrap_or_default())
|
||||
}
|
||||
|
||||
/// Attempt to form a single-char tag name.
|
||||
pub fn single_char_tagname(tagname: &str) -> Option<char> {
|
||||
// We return the tag character if and only if the tagname consists
|
||||
// of a single char.
|
||||
let mut tagnamechars = tagname.chars();
|
||||
let firstchar = tagnamechars.next();
|
||||
match firstchar {
|
||||
Some(_) => {
|
||||
// check second char
|
||||
if tagnamechars.next().is_none() {
|
||||
firstchar
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert network event to parsed/validated event.
|
||||
@@ -62,17 +89,33 @@ impl From<EventCmd> for Result<Event> {
|
||||
// ensure command is correct
|
||||
if ec.cmd != "EVENT" {
|
||||
Err(CommandUnknownError)
|
||||
} else if ec.event.is_valid() {
|
||||
let mut e = ec.event;
|
||||
e.build_index();
|
||||
Ok(e)
|
||||
} else {
|
||||
Err(EventInvalid)
|
||||
ec.event.validate().map(|_| {
|
||||
let mut e = ec.event;
|
||||
e.build_index();
|
||||
e.update_delegation();
|
||||
e
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Event {
|
||||
#[cfg(test)]
|
||||
pub fn simple_event() -> Event {
|
||||
Event {
|
||||
id: "0".to_owned(),
|
||||
pubkey: "0".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: vec![],
|
||||
content: "".to_owned(),
|
||||
sig: "0".to_owned(),
|
||||
tagidx: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_kind_metadata(&self) -> bool {
|
||||
self.kind == 0
|
||||
}
|
||||
@@ -92,6 +135,50 @@ impl Event {
|
||||
None
|
||||
}
|
||||
|
||||
// is this event delegated (properly)?
|
||||
// does the signature match, and are conditions valid?
|
||||
// if so, return an alternate author for the event
|
||||
pub fn delegated_author(&self) -> Option<String> {
|
||||
// is there a delegation tag?
|
||||
let delegation_tag: Vec<String> = self
|
||||
.tags
|
||||
.iter()
|
||||
.filter(|x| x.len() == 4)
|
||||
.filter(|x| x.get(0).unwrap() == "delegation")
|
||||
.take(1)
|
||||
.next()?
|
||||
.to_vec(); // get first tag
|
||||
|
||||
//let delegation_tag = self.tag_values_by_name("delegation");
|
||||
// delegation tags should have exactly 3 elements after the name (pubkey, condition, sig)
|
||||
// the event is signed by the delagatee
|
||||
let delegatee = &self.pubkey;
|
||||
// the delegation tag references the claimed delagator
|
||||
let delegator: &str = delegation_tag.get(1)?;
|
||||
let querystr: &str = delegation_tag.get(2)?;
|
||||
let sig: &str = delegation_tag.get(3)?;
|
||||
|
||||
// attempt to get a condition query; this requires the delegation to have a valid signature.
|
||||
if let Some(cond_query) = validate_delegation(delegator, delegatee, querystr, sig) {
|
||||
// The signature was valid, now we ensure the delegation
|
||||
// condition is valid for this event:
|
||||
if cond_query.allows_event(self) {
|
||||
// since this is allowed, we will provide the delegatee
|
||||
Some(delegator.into())
|
||||
} else {
|
||||
debug!("an event failed to satisfy delegation conditions");
|
||||
None
|
||||
}
|
||||
} else {
|
||||
debug!("event had had invalid delegation signature");
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Update delegation status
|
||||
fn update_delegation(&mut self) {
|
||||
self.delegated_by = self.delegated_author();
|
||||
}
|
||||
/// Build an event tag index
|
||||
fn build_index(&mut self) {
|
||||
// if there are no tags; just leave the index as None
|
||||
@@ -99,18 +186,21 @@ impl Event {
|
||||
return;
|
||||
}
|
||||
// otherwise, build an index
|
||||
let mut idx: HashMap<String, HashSet<String>> = HashMap::new();
|
||||
let mut idx: HashMap<char, HashSet<String>> = HashMap::new();
|
||||
// iterate over tags that have at least 2 elements
|
||||
for t in self.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagnamechar_opt = single_char_tagname(tagname);
|
||||
if tagnamechar_opt.is_none() {
|
||||
continue;
|
||||
}
|
||||
let tagnamechar = tagnamechar_opt.unwrap();
|
||||
let tagval = t.get(1).unwrap();
|
||||
// ensure a vector exists for this tag
|
||||
if !idx.contains_key(tagname) {
|
||||
idx.insert(tagname.clone(), HashSet::new());
|
||||
}
|
||||
idx.entry(tagnamechar).or_insert_with(HashSet::new);
|
||||
// get the tag vec and insert entry
|
||||
let tidx = idx.get_mut(tagname).expect("could not get tag vector");
|
||||
tidx.insert(tagval.clone());
|
||||
let idx_tag_vec = idx.get_mut(&tagnamechar).expect("could not get tag vector");
|
||||
idx_tag_vec.insert(tagval.clone());
|
||||
}
|
||||
// save the tag structure
|
||||
self.tagidx = Some(idx);
|
||||
@@ -124,7 +214,7 @@ impl Event {
|
||||
self.pubkey.chars().take(8).collect()
|
||||
}
|
||||
|
||||
/// Retrieve tag values
|
||||
/// Retrieve tag initial values across all tags matching the name
|
||||
pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
|
||||
self.tags
|
||||
.iter()
|
||||
@@ -134,13 +224,8 @@ impl Event {
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Check if this event has a valid signature.
|
||||
fn is_valid(&self) -> bool {
|
||||
// TODO: return a Result with a reason for invalid events
|
||||
// don't bother to validate an event with a timestamp in the distant future.
|
||||
let config = config::SETTINGS.read().unwrap();
|
||||
let max_future_sec = config.options.reject_future_seconds;
|
||||
if let Some(allowable_future) = max_future_sec {
|
||||
pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
|
||||
if let Some(allowable_future) = reject_future_seconds {
|
||||
let curr_time = unix_time();
|
||||
// calculate difference, plus how far future we allow
|
||||
if curr_time + (allowable_future as u64) < self.created_at {
|
||||
@@ -152,6 +237,12 @@ impl Event {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
/// Check if this event has a valid signature.
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
// TODO: return a Result with a reason for invalid events
|
||||
// validation is performed by:
|
||||
// * parsing JSON string into event fields
|
||||
// * create an array:
|
||||
@@ -159,8 +250,8 @@ impl Event {
|
||||
// * serialize with no spaces/newlines
|
||||
let c_opt = self.to_canonical();
|
||||
if c_opt.is_none() {
|
||||
debug!("event could not be canonicalized");
|
||||
return false;
|
||||
debug!("could not canonicalize");
|
||||
return Err(EventCouldNotCanonicalize);
|
||||
}
|
||||
let c = c_opt.unwrap();
|
||||
// * compute the sha256sum.
|
||||
@@ -169,22 +260,21 @@ impl Event {
|
||||
// * ensure the id matches the computed sha256sum.
|
||||
if self.id != hex_digest {
|
||||
debug!("event id does not match digest");
|
||||
return false;
|
||||
return Err(EventInvalidId);
|
||||
}
|
||||
// * validate the message digest (sig) using the pubkey & computed sha256 message hash.
|
||||
|
||||
let sig = schnorr::Signature::from_str(&self.sig).unwrap();
|
||||
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
|
||||
if let Ok(pubkey) = XOnlyPublicKey::from_str(&self.pubkey) {
|
||||
let verify = SECP.verify_schnorr(&sig, &msg, &pubkey);
|
||||
matches!(verify, Ok(()))
|
||||
SECP.verify_schnorr(&sig, &msg, &pubkey)
|
||||
.map_err(|_| EventInvalidSignature)
|
||||
} else {
|
||||
debug!("client sent malformed pubkey");
|
||||
false
|
||||
Err(EventMalformedPubkey)
|
||||
}
|
||||
} else {
|
||||
info!("error converting digest to secp256k1 message");
|
||||
false
|
||||
Err(EventInvalidSignature)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -226,9 +316,10 @@ impl Event {
|
||||
}
|
||||
|
||||
/// Determine if the given tag and value set intersect with tags in this event.
|
||||
pub fn generic_tag_val_intersect(&self, tagname: &str, check: &HashSet<String>) -> bool {
|
||||
pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
|
||||
match &self.tagidx {
|
||||
Some(idx) => match idx.get(tagname) {
|
||||
// check if this is indexable tagname
|
||||
Some(idx) => match idx.get(&tagname) {
|
||||
Some(valset) => {
|
||||
let common = valset.intersection(check);
|
||||
common.count() > 0
|
||||
@@ -243,62 +334,48 @@ impl Event {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
fn simple_event() -> Event {
|
||||
Event {
|
||||
id: "0".to_owned(),
|
||||
pubkey: "0".to_owned(),
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: vec![],
|
||||
content: "".to_owned(),
|
||||
sig: "0".to_owned(),
|
||||
tagidx: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_creation() {
|
||||
// create an event
|
||||
let event = simple_event();
|
||||
let event = Event::simple_event();
|
||||
assert_eq!(event.id, "0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_serialize() -> Result<()> {
|
||||
// serialize an event to JSON string
|
||||
let event = simple_event();
|
||||
let event = Event::simple_event();
|
||||
let j = serde_json::to_string(&event)?;
|
||||
assert_eq!(j, "{\"id\":\"0\",\"pubkey\":\"0\",\"created_at\":0,\"kind\":0,\"tags\":[],\"content\":\"\",\"sig\":\"0\"}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_event_tag_match() -> Result<()> {
|
||||
let event = simple_event();
|
||||
fn empty_event_tag_match() {
|
||||
let event = Event::simple_event();
|
||||
assert!(!event
|
||||
.generic_tag_val_intersect("e", &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
|
||||
Ok(())
|
||||
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_event_tag_match() -> Result<()> {
|
||||
let mut event = simple_event();
|
||||
fn single_event_tag_match() {
|
||||
let mut event = Event::simple_event();
|
||||
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
|
||||
event.build_index();
|
||||
assert_eq!(
|
||||
event.generic_tag_val_intersect(
|
||||
"e",
|
||||
'e',
|
||||
&HashSet::from(["foo".to_owned(), "bar".to_owned()])
|
||||
),
|
||||
true
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_tags_serialize() -> Result<()> {
|
||||
// serialize an event with tags to JSON string
|
||||
let mut event = simple_event();
|
||||
let mut event = Event::simple_event();
|
||||
event.tags = vec![
|
||||
vec![
|
||||
"e".to_owned(),
|
||||
@@ -330,6 +407,7 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
kind: 1,
|
||||
tags: vec![],
|
||||
@@ -347,6 +425,7 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
kind: 1,
|
||||
tags: vec![
|
||||
@@ -368,11 +447,39 @@ mod tests {
|
||||
assert_eq!(v, vec!["foo", "bar", "baz"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_no_tag_select() {
|
||||
let e = Event {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
kind: 1,
|
||||
tags: vec![
|
||||
vec!["j".to_owned(), "abc".to_owned()],
|
||||
vec!["e".to_owned(), "foo".to_owned()],
|
||||
vec!["e".to_owned(), "baz".to_owned()],
|
||||
vec![
|
||||
"p".to_owned(),
|
||||
"aaaa".to_owned(),
|
||||
"ws://example.com".to_owned(),
|
||||
],
|
||||
],
|
||||
content: "this is a test".to_owned(),
|
||||
sig: "abcde".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
let v = e.tag_values_by_name("x");
|
||||
// asking for tags that don't exist just returns zero-length vector
|
||||
assert_eq!(v.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_canonical_with_tags() {
|
||||
let e = Event {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
kind: 1,
|
||||
tags: vec![
|
||||
|
@@ -3,7 +3,7 @@ use crate::utils::is_hex;
|
||||
use hex;
|
||||
|
||||
/// Types of hexadecimal queries.
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)]
|
||||
pub enum HexSearch {
|
||||
// when no range is needed, exact 32-byte
|
||||
Exact(Vec<u8>),
|
||||
@@ -60,11 +60,10 @@ pub fn hex_range(s: &str) -> Option<HexSearch> {
|
||||
upper[byte_len] = b + 16; // bump up the first character in this byte
|
||||
// increment done, stop iterating through the vec
|
||||
break;
|
||||
} else {
|
||||
// if it is 'f', reset the byte to 0 and do a carry
|
||||
// reset and carry
|
||||
upper[byte_len] = 0;
|
||||
}
|
||||
// if it is 'f', reset the byte to 0 and do a carry
|
||||
// reset and carry
|
||||
upper[byte_len] = 0;
|
||||
// done with odd logic, so don't repeat this
|
||||
odd = false;
|
||||
} else {
|
||||
|
@@ -35,7 +35,7 @@ impl From<config::Info> for RelayInfo {
|
||||
description: i.description,
|
||||
pubkey: i.pubkey,
|
||||
contact: i.contact,
|
||||
supported_nips: Some(vec![1, 2, 11]),
|
||||
supported_nips: Some(vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 26]),
|
||||
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
|
||||
version: CARGO_PKG_VERSION.map(|x| x.to_owned()),
|
||||
}
|
||||
|
@@ -2,11 +2,15 @@ pub mod close;
|
||||
pub mod config;
|
||||
pub mod conn;
|
||||
pub mod db;
|
||||
pub mod delegation;
|
||||
pub mod error;
|
||||
pub mod event;
|
||||
pub mod hexrange;
|
||||
pub mod info;
|
||||
pub mod nip05;
|
||||
pub mod notice;
|
||||
pub mod schema;
|
||||
pub mod subscription;
|
||||
pub mod utils;
|
||||
// Public API for creating relays programatically
|
||||
pub mod server;
|
||||
|
600
src/main.rs
600
src/main.rs
@@ -1,583 +1,51 @@
|
||||
//! Server process
|
||||
use futures::SinkExt;
|
||||
use futures::StreamExt;
|
||||
use hyper::header::ACCEPT;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::upgrade::Upgraded;
|
||||
use hyper::{
|
||||
header, server::conn::AddrStream, upgrade, Body, Request, Response, Server, StatusCode,
|
||||
};
|
||||
use log::*;
|
||||
use nostr_rs_relay::close::Close;
|
||||
use nostr_rs_relay::close::CloseCmd;
|
||||
|
||||
use nostr_rs_relay::config;
|
||||
use nostr_rs_relay::conn;
|
||||
use nostr_rs_relay::db;
|
||||
use nostr_rs_relay::db::SubmittedEvent;
|
||||
use nostr_rs_relay::error::{Error, Result};
|
||||
use nostr_rs_relay::event::Event;
|
||||
use nostr_rs_relay::event::EventCmd;
|
||||
use nostr_rs_relay::info::RelayInfo;
|
||||
use nostr_rs_relay::nip05;
|
||||
use nostr_rs_relay::subscription::Subscription;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::Infallible;
|
||||
use nostr_rs_relay::server::start_server;
|
||||
use std::env;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use tokio::runtime::Builder;
|
||||
use tokio::sync::broadcast::{self, Receiver, Sender};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio_tungstenite::WebSocketStream;
|
||||
use tungstenite::error::Error as WsError;
|
||||
use tungstenite::handshake;
|
||||
use tungstenite::protocol::Message;
|
||||
use tungstenite::protocol::WebSocketConfig;
|
||||
use std::sync::mpsc as syncmpsc;
|
||||
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
|
||||
use std::thread;
|
||||
use tracing::info;
|
||||
|
||||
use console_subscriber::ConsoleLayer;
|
||||
|
||||
/// Return a requested DB name from command line arguments.
|
||||
fn db_from_args(args: Vec<String>) -> Option<String> {
|
||||
fn db_from_args(args: &[String]) -> Option<String> {
|
||||
if args.len() == 3 && args.get(1) == Some(&"--db".to_owned()) {
|
||||
return args.get(2).map(|x| x.to_owned());
|
||||
return args.get(2).map(std::clone::Clone::clone);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Handle arbitrary HTTP requests, including for WebSocket upgrades.
|
||||
async fn handle_web_request(
|
||||
mut request: Request<Body>,
|
||||
pool: db::SqlitePool,
|
||||
remote_addr: SocketAddr,
|
||||
broadcast: Sender<Event>,
|
||||
event_tx: tokio::sync::mpsc::Sender<SubmittedEvent>,
|
||||
shutdown: Receiver<()>,
|
||||
) -> Result<Response<Body>, Infallible> {
|
||||
match (
|
||||
request.uri().path(),
|
||||
request.headers().contains_key(header::UPGRADE),
|
||||
) {
|
||||
// Request for / as websocket
|
||||
("/", true) => {
|
||||
trace!("websocket with upgrade request");
|
||||
//assume request is a handshake, so create the handshake response
|
||||
let response = match handshake::server::create_response_with_body(&request, || {
|
||||
Body::empty()
|
||||
}) {
|
||||
Ok(response) => {
|
||||
//in case the handshake response creation succeeds,
|
||||
//spawn a task to handle the websocket connection
|
||||
tokio::spawn(async move {
|
||||
//using the hyper feature of upgrading a connection
|
||||
match upgrade::on(&mut request).await {
|
||||
//if successfully upgraded
|
||||
Ok(upgraded) => {
|
||||
// set WebSocket configuration options
|
||||
let mut config = WebSocketConfig::default();
|
||||
{
|
||||
let settings = config::SETTINGS.read().unwrap();
|
||||
config.max_message_size = settings.limits.max_ws_message_bytes;
|
||||
config.max_frame_size = settings.limits.max_ws_frame_bytes;
|
||||
}
|
||||
//create a websocket stream from the upgraded object
|
||||
let ws_stream = WebSocketStream::from_raw_socket(
|
||||
//pass the upgraded object
|
||||
//as the base layer stream of the Websocket
|
||||
upgraded,
|
||||
tokio_tungstenite::tungstenite::protocol::Role::Server,
|
||||
Some(config),
|
||||
)
|
||||
.await;
|
||||
|
||||
tokio::spawn(nostr_server(
|
||||
pool, ws_stream, broadcast, event_tx, shutdown,
|
||||
));
|
||||
}
|
||||
Err(e) => println!(
|
||||
"error when trying to upgrade connection \
|
||||
from address {} to websocket connection. \
|
||||
Error is: {}",
|
||||
remote_addr, e
|
||||
),
|
||||
}
|
||||
});
|
||||
//return the response to the handshake request
|
||||
response
|
||||
}
|
||||
Err(error) => {
|
||||
warn!("websocket response failed");
|
||||
let mut res =
|
||||
Response::new(Body::from(format!("Failed to create websocket: {}", error)));
|
||||
*res.status_mut() = StatusCode::BAD_REQUEST;
|
||||
return Ok(res);
|
||||
}
|
||||
};
|
||||
Ok::<_, Infallible>(response)
|
||||
}
|
||||
// Request for Relay info
|
||||
("/", false) => {
|
||||
// handle request at root with no upgrade header
|
||||
// Check if this is a nostr server info request
|
||||
let accept_header = &request.headers().get(ACCEPT);
|
||||
// check if application/nostr+json is included
|
||||
if let Some(media_types) = accept_header {
|
||||
if let Ok(mt_str) = media_types.to_str() {
|
||||
if mt_str.contains("application/nostr+json") {
|
||||
let config = config::SETTINGS.read().unwrap();
|
||||
// build a relay info response
|
||||
debug!("Responding to server info request");
|
||||
let rinfo = RelayInfo::from(config.info.clone());
|
||||
let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap());
|
||||
return Ok(Response::builder()
|
||||
.status(200)
|
||||
.header("Content-Type", "application/nostr+json")
|
||||
.body(b)
|
||||
.unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Response::new(Body::from(
|
||||
"Please use a Nostr client to connect.",
|
||||
)))
|
||||
}
|
||||
(_, _) => {
|
||||
//handle any other url
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::from("Nothing here."))
|
||||
.unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn shutdown_signal() {
|
||||
// Wait for the CTRL+C signal
|
||||
tokio::signal::ctrl_c()
|
||||
.await
|
||||
.expect("failed to install CTRL+C signal handler");
|
||||
}
|
||||
|
||||
/// Start running a Nostr relay server.
|
||||
fn main() -> Result<(), Error> {
|
||||
// setup logger
|
||||
let _ = env_logger::try_init();
|
||||
fn main() {
|
||||
// setup tracing
|
||||
let _trace_sub = tracing_subscriber::fmt::try_init();
|
||||
info!("Starting up from main");
|
||||
// get database directory from args
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let db_dir: Option<String> = db_from_args(args);
|
||||
{
|
||||
let mut settings = config::SETTINGS.write().unwrap();
|
||||
// replace default settings with those read from config.toml
|
||||
let mut c = config::Settings::new();
|
||||
// update with database location
|
||||
if let Some(db) = db_dir {
|
||||
c.database.data_directory = db;
|
||||
}
|
||||
*settings = c;
|
||||
let db_dir: Option<String> = db_from_args(&args);
|
||||
// configure settings from config.toml
|
||||
// replace default settings with those read from config.toml
|
||||
let mut settings = config::Settings::new();
|
||||
|
||||
if settings.diagnostics.tracing {
|
||||
// enable tracing with tokio-console
|
||||
ConsoleLayer::builder().with_default_env().init();
|
||||
}
|
||||
// update with database location
|
||||
if let Some(db) = db_dir {
|
||||
settings.database.data_directory = db;
|
||||
}
|
||||
|
||||
let settings = config::SETTINGS.read().unwrap();
|
||||
trace!("Config: {:?}", settings);
|
||||
// do some config validation.
|
||||
if !Path::new(&settings.database.data_directory).is_dir() {
|
||||
error!("Database directory does not exist");
|
||||
return Err(Error::DatabaseDirError);
|
||||
}
|
||||
let addr = format!(
|
||||
"{}:{}",
|
||||
settings.network.address.trim(),
|
||||
settings.network.port
|
||||
);
|
||||
let socket_addr = addr.parse().expect("listening address not valid");
|
||||
// address whitelisting settings
|
||||
if let Some(addr_whitelist) = &settings.authorization.pubkey_whitelist {
|
||||
info!(
|
||||
"Event publishing restricted to {} pubkey(s)",
|
||||
addr_whitelist.len()
|
||||
);
|
||||
}
|
||||
// check if NIP-05 enforced user verification is on
|
||||
if settings.verified_users.is_active() {
|
||||
info!(
|
||||
"NIP-05 user verification mode:{:?}",
|
||||
settings.verified_users.mode
|
||||
);
|
||||
if let Some(d) = settings.verified_users.verify_update_duration() {
|
||||
info!("NIP-05 check user verification every: {:?}", d);
|
||||
}
|
||||
if let Some(d) = settings.verified_users.verify_expiration_duration() {
|
||||
info!("NIP-05 user verification expires after: {:?}", d);
|
||||
}
|
||||
if let Some(wl) = &settings.verified_users.domain_whitelist {
|
||||
info!("NIP-05 domain whitelist: {:?}", wl);
|
||||
}
|
||||
if let Some(bl) = &settings.verified_users.domain_blacklist {
|
||||
info!("NIP-05 domain blacklist: {:?}", bl);
|
||||
}
|
||||
}
|
||||
// configure tokio runtime
|
||||
let rt = Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.thread_name("tokio-ws")
|
||||
.build()
|
||||
.unwrap();
|
||||
// start tokio
|
||||
rt.block_on(async {
|
||||
let settings = config::SETTINGS.read().unwrap();
|
||||
info!("listening on: {}", socket_addr);
|
||||
// all client-submitted valid events are broadcast to every
|
||||
// other client on this channel. This should be large enough
|
||||
// to accomodate slower readers (messages are dropped if
|
||||
// clients can not keep up).
|
||||
let (bcast_tx, _) = broadcast::channel::<Event>(settings.limits.broadcast_buffer);
|
||||
// validated events that need to be persisted are sent to the
|
||||
// database on via this channel.
|
||||
let (event_tx, event_rx) =
|
||||
mpsc::channel::<SubmittedEvent>(settings.limits.event_persist_buffer);
|
||||
// establish a channel for letting all threads now about a
|
||||
// requested server shutdown.
|
||||
let (invoke_shutdown, shutdown_listen) = broadcast::channel::<()>(1);
|
||||
// create a channel for sending any new metadata event. These
|
||||
// will get processed relatively slowly (a potentially
|
||||
// multi-second blocking HTTP call) on a single thread, so we
|
||||
// buffer requests on the channel. No harm in dropping events
|
||||
// here, since we are protecting against DoS. This can make
|
||||
// it difficult to setup initial metadata in bulk, since
|
||||
// overwhelming this will drop events and won't register
|
||||
// metadata events.
|
||||
let (metadata_tx, metadata_rx) = broadcast::channel::<Event>(4096);
|
||||
// start the database writer thread. Give it a channel for
|
||||
// writing events, and for publishing events that have been
|
||||
// written (to all connected clients).
|
||||
db::db_writer(
|
||||
event_rx,
|
||||
bcast_tx.clone(),
|
||||
metadata_tx.clone(),
|
||||
shutdown_listen,
|
||||
)
|
||||
.await;
|
||||
info!("db writer created");
|
||||
|
||||
// create a nip-05 verifier thread
|
||||
let verifier_opt = nip05::Verifier::new(metadata_rx, bcast_tx.clone());
|
||||
if let Ok(mut v) = verifier_opt {
|
||||
if settings.verified_users.is_active() {
|
||||
tokio::task::spawn(async move {
|
||||
info!("starting up NIP-05 verifier...");
|
||||
v.run().await;
|
||||
});
|
||||
}
|
||||
}
|
||||
// // listen for ctrl-c interruupts
|
||||
let ctrl_c_shutdown = invoke_shutdown.clone();
|
||||
tokio::spawn(async move {
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
info!("shutting down due to SIGINT");
|
||||
ctrl_c_shutdown.send(()).ok();
|
||||
});
|
||||
// build a connection pool for sqlite connections
|
||||
let pool = db::build_pool(
|
||||
"client query",
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY
|
||||
| rusqlite::OpenFlags::SQLITE_OPEN_SHARED_CACHE,
|
||||
settings.database.min_conn,
|
||||
settings.database.max_conn,
|
||||
true,
|
||||
);
|
||||
// A `Service` is needed for every connection, so this
|
||||
// creates one from our `handle_request` function.
|
||||
let make_svc = make_service_fn(|conn: &AddrStream| {
|
||||
let svc_pool = pool.clone();
|
||||
let remote_addr = conn.remote_addr();
|
||||
let bcast = bcast_tx.clone();
|
||||
let event = event_tx.clone();
|
||||
let stop = invoke_shutdown.clone();
|
||||
async move {
|
||||
// service_fn converts our function into a `Service`
|
||||
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
|
||||
handle_web_request(
|
||||
request,
|
||||
svc_pool.clone(),
|
||||
remote_addr,
|
||||
bcast.clone(),
|
||||
event.clone(),
|
||||
stop.subscribe(),
|
||||
)
|
||||
}))
|
||||
}
|
||||
});
|
||||
let server = Server::bind(&socket_addr)
|
||||
.serve(make_svc)
|
||||
.with_graceful_shutdown(shutdown_signal());
|
||||
// run hyper
|
||||
if let Err(e) = server.await {
|
||||
eprintln!("server error: {}", e);
|
||||
}
|
||||
// our code
|
||||
let (_, ctrl_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
|
||||
// run this in a new thread
|
||||
let handle = thread::spawn(|| {
|
||||
// we should have a 'control plane' channel to monitor and bump the server.
|
||||
// this will let us do stuff like clear the database, shutdown, etc.
|
||||
let _svr = start_server(settings, ctrl_rx);
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Nostr protocol messages from a client
|
||||
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
|
||||
#[serde(untagged)]
|
||||
pub enum NostrMessage {
|
||||
/// An `EVENT` message
|
||||
EventMsg(EventCmd),
|
||||
/// A `REQ` message
|
||||
SubMsg(Subscription),
|
||||
/// A `CLOSE` message
|
||||
CloseMsg(CloseCmd),
|
||||
}
|
||||
|
||||
/// Convert Message to NostrMessage
|
||||
fn convert_to_msg(msg: String) -> Result<NostrMessage> {
|
||||
let config = config::SETTINGS.read().unwrap();
|
||||
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
|
||||
match parsed_res {
|
||||
Ok(m) => {
|
||||
if let NostrMessage::EventMsg(_) = m {
|
||||
if let Some(max_size) = config.limits.max_event_bytes {
|
||||
// check length, ensure that some max size is set.
|
||||
if msg.len() > max_size && max_size > 0 {
|
||||
return Err(Error::EventMaxLengthError(msg.len()));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(m)
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("proto parse error: {:?}", e);
|
||||
debug!("parse error on message: {}", msg.trim());
|
||||
Err(Error::ProtoParseError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Turn a string into a NOTICE message ready to send over a WebSocket
|
||||
fn make_notice_message(msg: &str) -> Message {
|
||||
Message::text(json!(["NOTICE", msg]).to_string())
|
||||
}
|
||||
|
||||
/// Handle new client connections. This runs through an event loop
|
||||
/// for all client communication.
|
||||
async fn nostr_server(
|
||||
pool: db::SqlitePool,
|
||||
mut ws_stream: WebSocketStream<Upgraded>,
|
||||
broadcast: Sender<Event>,
|
||||
event_tx: mpsc::Sender<SubmittedEvent>,
|
||||
mut shutdown: Receiver<()>,
|
||||
) {
|
||||
// get a broadcast channel for clients to communicate on
|
||||
let mut bcast_rx = broadcast.subscribe();
|
||||
// Track internal client state
|
||||
let mut conn = conn::ClientConn::new();
|
||||
let cid = conn.get_client_prefix();
|
||||
// Create a channel for receiving query results from the database.
|
||||
// we will send out the tx handle to any query we generate.
|
||||
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(256);
|
||||
// Create channel for receiving NOTICEs
|
||||
let (notice_tx, mut notice_rx) = mpsc::channel::<String>(32);
|
||||
|
||||
// last time this client sent data (message, ping, etc.)
|
||||
let mut last_message_time = Instant::now();
|
||||
|
||||
// ping interval (every 5 minutes)
|
||||
let default_ping_dur = Duration::from_secs(300);
|
||||
|
||||
// disconnect after 20 minutes without a ping response or event.
|
||||
let max_quiet_time = Duration::from_secs(60 * 20);
|
||||
|
||||
let start = tokio::time::Instant::now() + default_ping_dur;
|
||||
let mut ping_interval = tokio::time::interval_at(start, default_ping_dur);
|
||||
|
||||
// maintain a hashmap of a oneshot channel for active subscriptions.
|
||||
// when these subscriptions are cancelled, make a message
|
||||
// available to the executing query so it knows to stop.
|
||||
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
|
||||
|
||||
// for stats, keep track of how many events the client published,
|
||||
// and how many it received from queries.
|
||||
let mut client_published_event_count: usize = 0;
|
||||
let mut client_received_event_count: usize = 0;
|
||||
info!("new connection for client: {:?}", cid);
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = shutdown.recv() => {
|
||||
// server shutting down, exit loop
|
||||
break;
|
||||
},
|
||||
_ = ping_interval.tick() => {
|
||||
// check how long since we talked to client
|
||||
// if it has been too long, disconnect
|
||||
if last_message_time.elapsed() > max_quiet_time {
|
||||
debug!("ending connection due to lack of client ping response");
|
||||
break;
|
||||
}
|
||||
// Send a ping
|
||||
ws_stream.send(Message::Ping(Vec::new())).await.ok();
|
||||
},
|
||||
Some(notice_msg) = notice_rx.recv() => {
|
||||
ws_stream.send(make_notice_message(¬ice_msg)).await.ok();
|
||||
},
|
||||
Some(query_result) = query_rx.recv() => {
|
||||
// database informed us of a query result we asked for
|
||||
client_received_event_count += 1;
|
||||
// send a result
|
||||
let subesc = query_result.sub_id.replace("\"", "");
|
||||
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
|
||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||
},
|
||||
// TODO: consider logging the LaggedRecv error
|
||||
Ok(global_event) = bcast_rx.recv() => {
|
||||
// an event has been broadcast to all clients
|
||||
// first check if there is a subscription for this event.
|
||||
let matching_subs = conn.get_matching_subscriptions(&global_event);
|
||||
for s in matching_subs {
|
||||
// TODO: serialize at broadcast time, instead of
|
||||
// once for each consumer.
|
||||
if let Ok(event_str) = serde_json::to_string(&global_event) {
|
||||
debug!("sub match: client: {:?}, sub: {:?}, event: {:?}",
|
||||
cid, s,
|
||||
global_event.get_event_id_prefix());
|
||||
// create an event response and send it
|
||||
let subesc = s.replace("\"", "");
|
||||
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{}\",{}]", subesc, event_str))).await.ok();
|
||||
//nostr_stream.send(res).await.ok();
|
||||
} else {
|
||||
warn!("could not serialize event {:?}", global_event.get_event_id_prefix());
|
||||
}
|
||||
}
|
||||
},
|
||||
ws_next = ws_stream.next() => {
|
||||
// update most recent message time for client
|
||||
last_message_time = Instant::now();
|
||||
// Consume text messages from the client, parse into Nostr messages.
|
||||
let nostr_msg = match ws_next {
|
||||
Some(Ok(Message::Text(m))) => {
|
||||
convert_to_msg(m)
|
||||
},
|
||||
Some(Ok(Message::Binary(_))) => {
|
||||
ws_stream.send(make_notice_message("binary messages are not accepted")).await.ok();
|
||||
continue;
|
||||
},
|
||||
Some(Ok(Message::Ping(_))) | Some(Ok(Message::Pong(_))) => {
|
||||
// get a ping/pong, ignore. tungstenite will
|
||||
// send responses automatically.
|
||||
continue;
|
||||
},
|
||||
None |
|
||||
Some(Ok(Message::Close(_))) |
|
||||
Some(Err(WsError::AlreadyClosed)) |
|
||||
Some(Err(WsError::ConnectionClosed)) |
|
||||
Some(Err(WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
|
||||
=> {
|
||||
debug!("websocket close from client: {:?}",cid);
|
||||
break;
|
||||
},
|
||||
Some(Err(WsError::Io(e))) => {
|
||||
// IO errors are considered fatal
|
||||
warn!("IO error (client: {:?}): {:?}", cid, e);
|
||||
break;
|
||||
}
|
||||
x => {
|
||||
// default condition on error is to close the client connection
|
||||
info!("unknown error (client: {:?}): {:?} (closing conn)", cid, x);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
// convert ws_next into proto_next
|
||||
match nostr_msg {
|
||||
Ok(NostrMessage::EventMsg(ec)) => {
|
||||
// An EventCmd needs to be validated to be converted into an Event
|
||||
// handle each type of message
|
||||
let parsed : Result<Event> = Result::<Event>::from(ec);
|
||||
match parsed {
|
||||
Ok(e) => {
|
||||
let id_prefix:String = e.id.chars().take(8).collect();
|
||||
debug!("successfully parsed/validated event: {:?} from client: {:?}", id_prefix, cid);
|
||||
// Write this to the database.
|
||||
let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone() };
|
||||
event_tx.send(submit_event).await.ok();
|
||||
client_published_event_count += 1;
|
||||
},
|
||||
Err(_) => {
|
||||
info!("client {:?} sent an invalid event", cid);
|
||||
ws_stream.send(make_notice_message("event was invalid")).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
Ok(NostrMessage::SubMsg(s)) => {
|
||||
debug!("client {} requesting a subscription", cid);
|
||||
// subscription handling consists of:
|
||||
// * registering the subscription so future events can be matched
|
||||
// * making a channel to cancel to request later
|
||||
// * sending a request for a SQL query
|
||||
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
|
||||
match conn.subscribe(s.clone()) {
|
||||
Ok(()) => {
|
||||
// when we insert, if there was a previous query running with the same name, cancel it.
|
||||
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
|
||||
previous_query.send(()).ok();
|
||||
}
|
||||
// start a database query
|
||||
db::db_query(s, pool.clone(), query_tx.clone(), abandon_query_rx).await;
|
||||
},
|
||||
Err(e) => {
|
||||
info!("Subscription error: {}", e);
|
||||
ws_stream.send(make_notice_message(&e.to_string())).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
Ok(NostrMessage::CloseMsg(cc)) => {
|
||||
// closing a request simply removes the subscription.
|
||||
let parsed : Result<Close> = Result::<Close>::from(cc);
|
||||
match parsed {
|
||||
Ok(c) => {
|
||||
// check if a query is currently
|
||||
// running, and remove it if so.
|
||||
let stop_tx = running_queries.remove(&c.id);
|
||||
if let Some(tx) = stop_tx {
|
||||
tx.send(()).ok();
|
||||
}
|
||||
// stop checking new events against
|
||||
// the subscription
|
||||
conn.unsubscribe(c);
|
||||
},
|
||||
Err(_) => {
|
||||
info!("invalid command ignored");
|
||||
ws_stream.send(make_notice_message("could not parse command")).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(Error::ConnError) => {
|
||||
debug!("got connection close/error, disconnecting client: {:?}",cid);
|
||||
break;
|
||||
}
|
||||
Err(Error::EventMaxLengthError(s)) => {
|
||||
info!("client {:?} sent event larger ({} bytes) than max size", cid, s);
|
||||
ws_stream.send(make_notice_message("event exceeded max size")).await.ok();
|
||||
},
|
||||
Err(Error::ProtoParseError) => {
|
||||
info!("client {:?} sent event that could not be parsed", cid);
|
||||
ws_stream.send(make_notice_message("could not parse command")).await.ok();
|
||||
},
|
||||
Err(e) => {
|
||||
info!("got non-fatal error from client: {:?}, error: {:?}", cid, e);
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
// connection cleanup - ensure any still running queries are terminated.
|
||||
for (_, stop_tx) in running_queries.into_iter() {
|
||||
stop_tx.send(()).ok();
|
||||
}
|
||||
info!(
|
||||
"stopping connection for client: {:?} (client sent {} event(s), received {})",
|
||||
cid, client_published_event_count, client_received_event_count
|
||||
);
|
||||
// block on nostr thread to finish.
|
||||
handle.join().unwrap();
|
||||
}
|
||||
|
84
src/nip05.rs
84
src/nip05.rs
@@ -4,7 +4,7 @@
|
||||
//! address with their public key, in metadata events. This module
|
||||
//! consumes a stream of metadata events, and keeps a database table
|
||||
//! updated with the current NIP-05 verification status.
|
||||
use crate::config::SETTINGS;
|
||||
use crate::config::VerifiedUsers;
|
||||
use crate::db;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::event::Event;
|
||||
@@ -13,13 +13,13 @@ use hyper::body::HttpBody;
|
||||
use hyper::client::connect::HttpConnector;
|
||||
use hyper::Client;
|
||||
use hyper_tls::HttpsConnector;
|
||||
use log::*;
|
||||
use rand::Rng;
|
||||
use rusqlite::params;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use std::time::SystemTime;
|
||||
use tokio::time::Interval;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
/// NIP-05 verifier state
|
||||
pub struct Verifier {
|
||||
@@ -31,6 +31,8 @@ pub struct Verifier {
|
||||
read_pool: db::SqlitePool,
|
||||
/// SQLite write query pool
|
||||
write_pool: db::SqlitePool,
|
||||
/// Settings
|
||||
settings: crate::config::Settings,
|
||||
/// HTTP client
|
||||
client: hyper::Client<HttpsConnector<HttpConnector>, hyper::Body>,
|
||||
/// After all accounts are updated, wait this long before checking again.
|
||||
@@ -42,7 +44,7 @@ pub struct Verifier {
|
||||
}
|
||||
|
||||
/// A NIP-05 identifier is a local part and domain.
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Nip05Name {
|
||||
local: String,
|
||||
domain: String,
|
||||
@@ -138,11 +140,13 @@ impl Verifier {
|
||||
pub fn new(
|
||||
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
|
||||
event_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
settings: crate::config::Settings,
|
||||
) -> Result<Self> {
|
||||
info!("creating NIP-05 verifier");
|
||||
// build a database connection for reading and writing.
|
||||
let write_pool = db::build_pool(
|
||||
"nip05 writer",
|
||||
&settings,
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE,
|
||||
1, // min conns
|
||||
4, // max conns
|
||||
@@ -150,6 +154,7 @@ impl Verifier {
|
||||
);
|
||||
let read_pool = db::build_pool(
|
||||
"nip05 reader",
|
||||
&settings,
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
|
||||
1, // min conns
|
||||
8, // max conns
|
||||
@@ -174,6 +179,7 @@ impl Verifier {
|
||||
event_tx,
|
||||
read_pool,
|
||||
write_pool,
|
||||
settings,
|
||||
client,
|
||||
wait_after_finish,
|
||||
http_wait_duration,
|
||||
@@ -214,7 +220,11 @@ impl Verifier {
|
||||
pubkey: &str,
|
||||
) -> Result<UserWebVerificationStatus> {
|
||||
// determine if this domain should be checked
|
||||
if !is_domain_allowed(&nip.domain) {
|
||||
if !is_domain_allowed(
|
||||
&nip.domain,
|
||||
&self.settings.verified_users.domain_whitelist,
|
||||
&self.settings.verified_users.domain_blacklist,
|
||||
) {
|
||||
return Ok(UserWebVerificationStatus::DomainNotAllowed);
|
||||
}
|
||||
let url = nip
|
||||
@@ -239,9 +249,9 @@ impl Verifier {
|
||||
// HTTP request with timeout
|
||||
match tokio::time::timeout(Duration::from_secs(5), response_fut).await {
|
||||
Ok(response_res) => {
|
||||
let response = response_res?;
|
||||
// limit size of verification document to 1MB.
|
||||
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
|
||||
let response = response_res?;
|
||||
// determine content length from response
|
||||
let response_content_length = match response.body().size_hint().upper() {
|
||||
Some(v) => v,
|
||||
@@ -257,12 +267,11 @@ impl Verifier {
|
||||
let body_matches = body_contains_user(&nip.local, pubkey, body_bytes)?;
|
||||
if body_matches {
|
||||
return Ok(UserWebVerificationStatus::Verified);
|
||||
} else {
|
||||
// successful response, parsed as a nip-05
|
||||
// document, but this name/pubkey was not
|
||||
// present.
|
||||
return Ok(UserWebVerificationStatus::Unverified);
|
||||
}
|
||||
// successful response, parsed as a nip-05
|
||||
// document, but this name/pubkey was not
|
||||
// present.
|
||||
return Ok(UserWebVerificationStatus::Unverified);
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
@@ -347,15 +356,11 @@ impl Verifier {
|
||||
|
||||
/// Reverify the oldest user verification record.
|
||||
async fn do_reverify(&mut self) -> Result<()> {
|
||||
let reverify_setting;
|
||||
let max_failures;
|
||||
{
|
||||
// this block prevents a read handle to settings being
|
||||
// captured by the async DB call (guard is not Send)
|
||||
let settings = SETTINGS.read().unwrap();
|
||||
reverify_setting = settings.verified_users.verify_update_frequency_duration;
|
||||
max_failures = settings.verified_users.max_consecutive_failures;
|
||||
}
|
||||
let reverify_setting = self
|
||||
.settings
|
||||
.verified_users
|
||||
.verify_update_frequency_duration;
|
||||
let max_failures = self.settings.verified_users.max_consecutive_failures;
|
||||
// get from settings, but default to 6hrs between re-checking an account
|
||||
let reverify_dur = reverify_setting.unwrap_or_else(|| Duration::from_secs(60 * 60 * 6));
|
||||
// find all verification records that have success or failure OLDER than the reverify_dur.
|
||||
@@ -506,17 +511,13 @@ impl Verifier {
|
||||
let start = Instant::now();
|
||||
// we should only do this if we are enabled. if we are
|
||||
// disabled/passive, the event has already been persisted.
|
||||
let should_write_event;
|
||||
{
|
||||
let settings = SETTINGS.read().unwrap();
|
||||
should_write_event = settings.verified_users.is_enabled()
|
||||
}
|
||||
let should_write_event = self.settings.verified_users.is_enabled();
|
||||
if should_write_event {
|
||||
match db::write_event(&mut self.write_pool.get()?, event) {
|
||||
Ok(updated) => {
|
||||
if updated != 0 {
|
||||
info!(
|
||||
"persisted event: {:?} in {:?}",
|
||||
"persisted event (new verified pubkey): {:?} in {:?}",
|
||||
event.get_event_id_prefix(),
|
||||
start.elapsed()
|
||||
);
|
||||
@@ -538,7 +539,7 @@ impl Verifier {
|
||||
}
|
||||
|
||||
/// Result of checking user's verification status against DNS/HTTP.
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
pub enum UserWebVerificationStatus {
|
||||
Verified, // user is verified, as of now.
|
||||
DomainNotAllowed, // domain blacklist or whitelist denied us from attempting a verification
|
||||
@@ -547,7 +548,7 @@ pub enum UserWebVerificationStatus {
|
||||
}
|
||||
|
||||
/// A NIP-05 verification record.
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
// Basic information for a verification event. Gives us all we need to assert a NIP-05 address is good.
|
||||
pub struct VerificationRecord {
|
||||
pub rowid: u64, // database row for this verification event
|
||||
@@ -562,15 +563,18 @@ pub struct VerificationRecord {
|
||||
|
||||
/// Check with settings to determine if a given domain is allowed to
|
||||
/// publish.
|
||||
pub fn is_domain_allowed(domain: &str) -> bool {
|
||||
let settings = SETTINGS.read().unwrap();
|
||||
pub fn is_domain_allowed(
|
||||
domain: &str,
|
||||
whitelist: &Option<Vec<String>>,
|
||||
blacklist: &Option<Vec<String>>,
|
||||
) -> bool {
|
||||
// if there is a whitelist, domain must be present in it.
|
||||
if let Some(wl) = &settings.verified_users.domain_whitelist {
|
||||
if let Some(wl) = whitelist {
|
||||
// workaround for Vec contains not accepting &str
|
||||
return wl.iter().any(|x| x == domain);
|
||||
}
|
||||
// otherwise, check that user is not in the blacklist
|
||||
if let Some(bl) = &settings.verified_users.domain_blacklist {
|
||||
if let Some(bl) = blacklist {
|
||||
return !bl.iter().any(|x| x == domain);
|
||||
}
|
||||
true
|
||||
@@ -579,17 +583,21 @@ pub fn is_domain_allowed(domain: &str) -> bool {
|
||||
impl VerificationRecord {
|
||||
/// Check if the record is recent enough to be considered valid,
|
||||
/// and the domain is allowed.
|
||||
pub fn is_valid(&self) -> bool {
|
||||
let settings = SETTINGS.read().unwrap();
|
||||
pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
|
||||
//let settings = SETTINGS.read().unwrap();
|
||||
// how long a verification record is good for
|
||||
let nip05_expiration = &settings.verified_users.verify_expiration_duration;
|
||||
let nip05_expiration = &verified_users_settings.verify_expiration_duration;
|
||||
if let Some(e) = nip05_expiration {
|
||||
if !self.is_current(e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// check domains
|
||||
is_domain_allowed(&self.name.domain)
|
||||
is_domain_allowed(
|
||||
&self.name.domain,
|
||||
&verified_users_settings.domain_whitelist,
|
||||
&verified_users_settings.domain_blacklist,
|
||||
)
|
||||
}
|
||||
|
||||
/// Check if this record has been validated since the given
|
||||
@@ -705,9 +713,7 @@ pub async fn get_oldest_user_verification(
|
||||
conn: db::PooledConnection,
|
||||
earliest: u64,
|
||||
) -> Result<VerificationRecord> {
|
||||
let res =
|
||||
tokio::task::spawn_blocking(move || query_oldest_user_verification(conn, earliest)).await?;
|
||||
res
|
||||
tokio::task::spawn_blocking(move || query_oldest_user_verification(conn, earliest)).await?
|
||||
}
|
||||
|
||||
pub fn query_oldest_user_verification(
|
||||
@@ -715,7 +721,7 @@ pub fn query_oldest_user_verification(
|
||||
earliest: u64,
|
||||
) -> Result<VerificationRecord> {
|
||||
let tx = conn.transaction()?;
|
||||
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
|
||||
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v INNER JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
|
||||
let mut stmt = tx.prepare_cached(query)?;
|
||||
let fields = stmt.query_row(params![earliest, earliest], |r| {
|
||||
let rowid: u64 = r.get(0)?;
|
||||
|
86
src/notice.rs
Normal file
86
src/notice.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
pub enum EventResultStatus {
|
||||
Saved,
|
||||
Duplicate,
|
||||
Invalid,
|
||||
Blocked,
|
||||
RateLimited,
|
||||
Error,
|
||||
}
|
||||
|
||||
pub struct EventResult {
|
||||
pub id: String,
|
||||
pub msg: String,
|
||||
pub status: EventResultStatus,
|
||||
}
|
||||
|
||||
pub enum Notice {
|
||||
Message(String),
|
||||
EventResult(EventResult),
|
||||
}
|
||||
|
||||
impl EventResultStatus {
|
||||
pub fn to_bool(&self) -> bool {
|
||||
match self {
|
||||
Self::Saved => true,
|
||||
Self::Duplicate => true,
|
||||
Self::Invalid => false,
|
||||
Self::Blocked => false,
|
||||
Self::RateLimited => false,
|
||||
Self::Error => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn prefix(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Saved => "saved",
|
||||
Self::Duplicate => "duplicate",
|
||||
Self::Invalid => "invalid",
|
||||
Self::Blocked => "blocked",
|
||||
Self::RateLimited => "rate-limited",
|
||||
Self::Error => "error",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Notice {
|
||||
//pub fn err(err: error::Error, id: String) -> Notice {
|
||||
// Notice::err_msg(format!("{}", err), id)
|
||||
//}
|
||||
|
||||
pub fn message(msg: String) -> Notice {
|
||||
Notice::Message(msg)
|
||||
}
|
||||
|
||||
fn prefixed(id: String, msg: &str, status: EventResultStatus) -> Notice {
|
||||
let msg = format!("{}: {}", status.prefix(), msg);
|
||||
Notice::EventResult(EventResult { id, msg, status })
|
||||
}
|
||||
|
||||
pub fn invalid(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Invalid)
|
||||
}
|
||||
|
||||
pub fn blocked(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Blocked)
|
||||
}
|
||||
|
||||
pub fn rate_limited(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::RateLimited)
|
||||
}
|
||||
|
||||
pub fn duplicate(id: String) -> Notice {
|
||||
Notice::prefixed(id, "", EventResultStatus::Duplicate)
|
||||
}
|
||||
|
||||
pub fn error(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Error)
|
||||
}
|
||||
|
||||
pub fn saved(id: String) -> Notice {
|
||||
Notice::EventResult(EventResult {
|
||||
id,
|
||||
msg: "".into(),
|
||||
status: EventResultStatus::Saved,
|
||||
})
|
||||
}
|
||||
}
|
436
src/schema.rs
436
src/schema.rs
@@ -1,30 +1,37 @@
|
||||
//! Database schema and migrations
|
||||
use crate::db::PooledConnection;
|
||||
use crate::error::Result;
|
||||
use crate::utils::is_hex;
|
||||
use log::*;
|
||||
use crate::event::{single_char_tagname, Event};
|
||||
use crate::utils::is_lower_hex;
|
||||
use const_format::formatcp;
|
||||
use rusqlite::limits::Limit;
|
||||
use rusqlite::params;
|
||||
use rusqlite::Connection;
|
||||
|
||||
// TODO: drop the pubkey_ref and event_ref tables
|
||||
use std::cmp::Ordering;
|
||||
use std::time::Instant;
|
||||
use tracing::{debug, error, info};
|
||||
|
||||
/// Startup DB Pragmas
|
||||
pub const STARTUP_SQL: &str = r##"
|
||||
PRAGMA main.synchronous=NORMAL;
|
||||
PRAGMA foreign_keys = ON;
|
||||
pragma mmap_size = 536870912; -- 512MB of mmap
|
||||
PRAGMA journal_size_limit=32768;
|
||||
pragma mmap_size = 17179869184; -- cap mmap at 16GB
|
||||
"##;
|
||||
|
||||
/// Latest database version
|
||||
pub const DB_VERSION: usize = 11;
|
||||
|
||||
/// Schema definition
|
||||
const INIT_SQL: &str = r##"
|
||||
const INIT_SQL: &str = formatcp!(
|
||||
r##"
|
||||
-- Database settings
|
||||
PRAGMA encoding = "UTF-8";
|
||||
PRAGMA journal_mode=WAL;
|
||||
PRAGMA main.synchronous=NORMAL;
|
||||
PRAGMA foreign_keys = ON;
|
||||
PRAGMA application_id = 1654008667;
|
||||
PRAGMA user_version = 5;
|
||||
PRAGMA user_version = {};
|
||||
|
||||
-- Event Table
|
||||
CREATE TABLE IF NOT EXISTS event (
|
||||
@@ -33,6 +40,7 @@ event_hash BLOB NOT NULL, -- 4-byte hash
|
||||
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970)
|
||||
created_at INTEGER NOT NULL, -- when the event was authored
|
||||
author BLOB NOT NULL, -- author pubkey
|
||||
delegated_by BLOB, -- delegator pubkey (NIP-26)
|
||||
kind INTEGER NOT NULL, -- event kind
|
||||
hidden INTEGER, -- relevant for queries
|
||||
content TEXT NOT NULL -- serialized json of event object
|
||||
@@ -40,9 +48,10 @@ content TEXT NOT NULL -- serialized json of event object
|
||||
|
||||
-- Event Indexes
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
|
||||
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
|
||||
CREATE INDEX IF NOT EXISTS author_index ON event(author);
|
||||
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
|
||||
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
|
||||
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
|
||||
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
|
||||
|
||||
-- Tag Table
|
||||
-- Tag values are stored as either a BLOB (if they come in as a
|
||||
@@ -53,11 +62,13 @@ id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
||||
name TEXT, -- the tag name ("p", "e", whatever)
|
||||
value TEXT, -- the tag value, if not hex.
|
||||
value_hex BLOB, -- the tag value, if it can be interpreted as a hex string.
|
||||
value_hex BLOB, -- the tag value, if it can be interpreted as a lowercase hex string.
|
||||
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_hex_index ON tag(value_hex);
|
||||
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
|
||||
|
||||
-- NIP-05 User Validation
|
||||
CREATE TABLE IF NOT EXISTS user_verification (
|
||||
@@ -71,19 +82,37 @@ FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CAS
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
|
||||
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
|
||||
"##;
|
||||
"##,
|
||||
DB_VERSION
|
||||
);
|
||||
|
||||
/// Determine the current application database schema version.
|
||||
pub fn db_version(conn: &mut Connection) -> Result<usize> {
|
||||
pub fn curr_db_version(conn: &mut Connection) -> Result<usize> {
|
||||
let query = "PRAGMA user_version;";
|
||||
let curr_version = conn.query_row(query, [], |row| row.get(0))?;
|
||||
Ok(curr_version)
|
||||
}
|
||||
|
||||
fn mig_init(conn: &mut PooledConnection) -> Result<usize> {
|
||||
match conn.execute_batch(INIT_SQL) {
|
||||
Ok(()) => {
|
||||
info!(
|
||||
"database pragma/schema initialized to v{}, and ready",
|
||||
DB_VERSION
|
||||
);
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be initialized");
|
||||
}
|
||||
}
|
||||
Ok(DB_VERSION)
|
||||
}
|
||||
|
||||
/// Upgrade DB to latest version, and execute pragma settings
|
||||
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
// check the version.
|
||||
let mut curr_version = db_version(conn)?;
|
||||
let mut curr_version = curr_db_version(conn)?;
|
||||
info!("DB version = {:?}", curr_version);
|
||||
|
||||
debug!(
|
||||
@@ -99,40 +128,101 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH) as f64 / (1024 * 1024) as f64).floor()
|
||||
);
|
||||
|
||||
// initialize from scratch
|
||||
if curr_version == 0 {
|
||||
match conn.execute_batch(INIT_SQL) {
|
||||
Ok(()) => {
|
||||
info!("database pragma/schema initialized to v4, and ready");
|
||||
match curr_version.cmp(&DB_VERSION) {
|
||||
// Database is new or not current
|
||||
Ordering::Less => {
|
||||
// initialize from scratch
|
||||
if curr_version == 0 {
|
||||
curr_version = mig_init(conn)?;
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be initialized");
|
||||
// for initialized but out-of-date schemas, proceed to
|
||||
// upgrade sequentially until we are current.
|
||||
if curr_version == 1 {
|
||||
curr_version = mig_1_to_2(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 2 {
|
||||
curr_version = mig_2_to_3(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 3 {
|
||||
curr_version = mig_3_to_4(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 4 {
|
||||
curr_version = mig_4_to_5(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 5 {
|
||||
curr_version = mig_5_to_6(conn)?;
|
||||
}
|
||||
if curr_version == 6 {
|
||||
curr_version = mig_6_to_7(conn)?;
|
||||
}
|
||||
if curr_version == 7 {
|
||||
curr_version = mig_7_to_8(conn)?;
|
||||
}
|
||||
if curr_version == 8 {
|
||||
curr_version = mig_8_to_9(conn)?;
|
||||
}
|
||||
if curr_version == 9 {
|
||||
curr_version = mig_9_to_10(conn)?;
|
||||
}
|
||||
if curr_version == 10 {
|
||||
curr_version = mig_10_to_11(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == DB_VERSION {
|
||||
info!(
|
||||
"All migration scripts completed successfully. Welcome to v{}.",
|
||||
DB_VERSION
|
||||
);
|
||||
}
|
||||
}
|
||||
// Database is current, all is good
|
||||
Ordering::Equal => {
|
||||
debug!("Database version was already current (v{})", DB_VERSION);
|
||||
}
|
||||
// Database is newer than what this code understands, abort
|
||||
Ordering::Greater => {
|
||||
panic!(
|
||||
"Database version is newer than supported by this executable (v{} > v{})",
|
||||
curr_version, DB_VERSION
|
||||
);
|
||||
}
|
||||
}
|
||||
if curr_version == 1 {
|
||||
// only change is adding a hidden column to events.
|
||||
let upgrade_sql = r##"
|
||||
|
||||
// Setup PRAGMA
|
||||
conn.execute_batch(STARTUP_SQL)?;
|
||||
debug!("SQLite PRAGMA startup completed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
//// Migration Scripts
|
||||
|
||||
fn mig_1_to_2(conn: &mut PooledConnection) -> Result<usize> {
|
||||
// only change is adding a hidden column to events.
|
||||
let upgrade_sql = r##"
|
||||
ALTER TABLE event ADD hidden INTEGER;
|
||||
UPDATE event SET hidden=FALSE;
|
||||
PRAGMA user_version = 2;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v1 -> v2");
|
||||
curr_version = 2;
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v1 -> v2");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
if curr_version == 2 {
|
||||
// this version lacks the tag column
|
||||
info!("database schema needs update from 2->3");
|
||||
let upgrade_sql = r##"
|
||||
Ok(2)
|
||||
}
|
||||
|
||||
fn mig_2_to_3(conn: &mut PooledConnection) -> Result<usize> {
|
||||
// this version lacks the tag column
|
||||
info!("database schema needs update from 2->3");
|
||||
let upgrade_sql = r##"
|
||||
CREATE TABLE IF NOT EXISTS tag (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
||||
@@ -143,43 +233,43 @@ FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
PRAGMA user_version = 3;
|
||||
"##;
|
||||
// TODO: load existing refs into tag table
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v2 -> v3");
|
||||
curr_version = 3;
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
// TODO: load existing refs into tag table
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v2 -> v3");
|
||||
}
|
||||
info!("Starting transaction");
|
||||
// iterate over every event/pubkey tag
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let mut stmt = tx.prepare("select event_id, \"e\", lower(hex(referenced_event)) from event_ref union select event_id, \"p\", lower(hex(referenced_pubkey)) from pubkey_ref;")?;
|
||||
let mut tag_rows = stmt.query([])?;
|
||||
while let Some(row) = tag_rows.next()? {
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_id: u64 = row.get(0)?;
|
||||
let tag_name: String = row.get(1)?;
|
||||
let tag_value: String = row.get(2)?;
|
||||
// this will leave behind p/e tags that were non-hex, but they are invalid anyways.
|
||||
if is_hex(&tag_value) {
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tag_name, hex::decode(&tag_value).ok()],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("Upgrade complete");
|
||||
}
|
||||
if curr_version == 3 {
|
||||
info!("database schema needs update from 3->4");
|
||||
let upgrade_sql = r##"
|
||||
// iterate over every event/pubkey tag
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let mut stmt = tx.prepare("select event_id, \"e\", lower(hex(referenced_event)) from event_ref union select event_id, \"p\", lower(hex(referenced_pubkey)) from pubkey_ref;")?;
|
||||
let mut tag_rows = stmt.query([])?;
|
||||
while let Some(row) = tag_rows.next()? {
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_id: u64 = row.get(0)?;
|
||||
let tag_name: String = row.get(1)?;
|
||||
let tag_value: String = row.get(2)?;
|
||||
// this will leave behind p/e tags that were non-hex, but they are invalid anyways.
|
||||
if is_lower_hex(&tag_value) {
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tag_name, hex::decode(&tag_value).ok()],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("Updated tag values");
|
||||
tx.commit()?;
|
||||
Ok(3)
|
||||
}
|
||||
|
||||
fn mig_3_to_4(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 3->4");
|
||||
let upgrade_sql = r##"
|
||||
-- incoming metadata events with nip05
|
||||
CREATE TABLE IF NOT EXISTS user_verification (
|
||||
id INTEGER PRIMARY KEY,
|
||||
@@ -194,44 +284,188 @@ CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(nam
|
||||
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
|
||||
PRAGMA user_version = 4;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v3 -> v4");
|
||||
curr_version = 4;
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v3 -> v4");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(4)
|
||||
}
|
||||
|
||||
if curr_version == 4 {
|
||||
info!("database schema needs update from 4->5");
|
||||
let upgrade_sql = r##"
|
||||
fn mig_4_to_5(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 4->5");
|
||||
let upgrade_sql = r##"
|
||||
DROP TABLE IF EXISTS event_ref;
|
||||
DROP TABLE IF EXISTS pubkey_ref;
|
||||
PRAGMA user_version=5;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v4 -> v5");
|
||||
// uncomment if we have a newer version
|
||||
//curr_version = 5;
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v4 -> v5");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(5)
|
||||
}
|
||||
|
||||
fn mig_5_to_6(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 5->6");
|
||||
// We need to rebuild the tags table. iterate through the
|
||||
// event table. build event from json, insert tags into a
|
||||
// fresh tag table. This was needed due to a logic error in
|
||||
// how hex-like tags got indexed.
|
||||
let start = Instant::now();
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
// Clear out table
|
||||
tx.execute("DELETE FROM tag;", [])?;
|
||||
let mut stmt = tx.prepare("select id, content from event order by id;")?;
|
||||
let mut tag_rows = stmt.query([])?;
|
||||
while let Some(row) = tag_rows.next()? {
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_id: u64 = row.get(0)?;
|
||||
let event_json: String = row.get(1)?;
|
||||
let event: Event = serde_json::from_str(&event_json)?;
|
||||
// look at each event, and each tag, creating new tag entries if appropriate.
|
||||
for t in event.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagnamechar_opt = single_char_tagname(tagname);
|
||||
if tagnamechar_opt.is_none() {
|
||||
continue;
|
||||
}
|
||||
// safe because len was > 1
|
||||
let tagval = t.get(1).unwrap();
|
||||
// insert as BLOB if we can restore it losslessly.
|
||||
// this means it needs to be even length and lowercase.
|
||||
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tagname, hex::decode(tagval).ok()],
|
||||
)?;
|
||||
} else {
|
||||
// otherwise, insert as text
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tagname, &tagval],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if curr_version == 5 {
|
||||
debug!("Database version was already current");
|
||||
} else if curr_version > 5 {
|
||||
panic!("Database version is newer than supported by this executable");
|
||||
tx.execute("PRAGMA user_version = 6;", [])?;
|
||||
}
|
||||
|
||||
// Setup PRAGMA
|
||||
conn.execute_batch(STARTUP_SQL)?;
|
||||
debug!("SQLite PRAGMA startup completed");
|
||||
Ok(())
|
||||
tx.commit()?;
|
||||
info!("database schema upgraded v5 -> v6 in {:?}", start.elapsed());
|
||||
// vacuum after large table modification
|
||||
let start = Instant::now();
|
||||
conn.execute("VACUUM;", [])?;
|
||||
info!("vacuumed DB after tags rebuild in {:?}", start.elapsed());
|
||||
Ok(6)
|
||||
}
|
||||
|
||||
fn mig_6_to_7(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 6->7");
|
||||
// only change is adding a hidden column to events.
|
||||
let upgrade_sql = r##"
|
||||
ALTER TABLE event ADD delegated_by BLOB;
|
||||
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
|
||||
PRAGMA user_version = 7;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v6 -> v7");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(7)
|
||||
}
|
||||
|
||||
fn mig_7_to_8(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 7->8");
|
||||
// Remove redundant indexes, and add a better multi-column index.
|
||||
let upgrade_sql = r##"
|
||||
DROP INDEX IF EXISTS created_at_index;
|
||||
DROP INDEX IF EXISTS kind_index;
|
||||
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
|
||||
PRAGMA user_version = 8;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v7 -> v8");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(8)
|
||||
}
|
||||
|
||||
fn mig_8_to_9(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 8->9");
|
||||
// Those old indexes were actually helpful...
|
||||
let upgrade_sql = r##"
|
||||
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
|
||||
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
|
||||
PRAGMA user_version = 9;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v8 -> v9");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(9)
|
||||
}
|
||||
|
||||
fn mig_9_to_10(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 9->10");
|
||||
// Those old indexes were actually helpful...
|
||||
let upgrade_sql = r##"
|
||||
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
|
||||
PRAGMA user_version = 10;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v9 -> v10");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(10)
|
||||
}
|
||||
|
||||
fn mig_10_to_11(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 10->11");
|
||||
// Those old indexes were actually helpful...
|
||||
let upgrade_sql = r##"
|
||||
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
|
||||
reindex;
|
||||
pragma optimize;
|
||||
PRAGMA user_version = 11;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v10 -> v11");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(11)
|
||||
}
|
||||
|
738
src/server.rs
Normal file
738
src/server.rs
Normal file
@@ -0,0 +1,738 @@
|
||||
//! Server process
|
||||
use crate::close::Close;
|
||||
use crate::close::CloseCmd;
|
||||
use crate::config::{Settings, VerifiedUsersMode};
|
||||
use crate::conn;
|
||||
use crate::db;
|
||||
use crate::db::SubmittedEvent;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::event::Event;
|
||||
use crate::event::EventCmd;
|
||||
use crate::info::RelayInfo;
|
||||
use crate::nip05;
|
||||
use crate::notice::Notice;
|
||||
use crate::subscription::Subscription;
|
||||
use futures::SinkExt;
|
||||
use futures::StreamExt;
|
||||
use governor::{Jitter, Quota, RateLimiter};
|
||||
use http::header::HeaderMap;
|
||||
use hyper::header::ACCEPT;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::upgrade::Upgraded;
|
||||
use hyper::{
|
||||
header, server::conn::AddrStream, upgrade, Body, Request, Response, Server, StatusCode,
|
||||
};
|
||||
use rusqlite::OpenFlags;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::Infallible;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::mpsc::Receiver as MpscReceiver;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use tokio::runtime::Builder;
|
||||
use tokio::sync::broadcast::{self, Receiver, Sender};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio_tungstenite::WebSocketStream;
|
||||
use tracing::*;
|
||||
use tungstenite::error::CapacityError::MessageTooLong;
|
||||
use tungstenite::error::Error as WsError;
|
||||
use tungstenite::handshake;
|
||||
use tungstenite::protocol::Message;
|
||||
use tungstenite::protocol::WebSocketConfig;
|
||||
|
||||
/// Handle arbitrary HTTP requests, including for WebSocket upgrades.
|
||||
async fn handle_web_request(
|
||||
mut request: Request<Body>,
|
||||
pool: db::SqlitePool,
|
||||
settings: Settings,
|
||||
remote_addr: SocketAddr,
|
||||
broadcast: Sender<Event>,
|
||||
event_tx: tokio::sync::mpsc::Sender<SubmittedEvent>,
|
||||
shutdown: Receiver<()>,
|
||||
) -> Result<Response<Body>, Infallible> {
|
||||
match (
|
||||
request.uri().path(),
|
||||
request.headers().contains_key(header::UPGRADE),
|
||||
) {
|
||||
// Request for / as websocket
|
||||
("/", true) => {
|
||||
trace!("websocket with upgrade request");
|
||||
//assume request is a handshake, so create the handshake response
|
||||
let response = match handshake::server::create_response_with_body(&request, || {
|
||||
Body::empty()
|
||||
}) {
|
||||
Ok(response) => {
|
||||
//in case the handshake response creation succeeds,
|
||||
//spawn a task to handle the websocket connection
|
||||
tokio::spawn(async move {
|
||||
//using the hyper feature of upgrading a connection
|
||||
match upgrade::on(&mut request).await {
|
||||
//if successfully upgraded
|
||||
Ok(upgraded) => {
|
||||
// set WebSocket configuration options
|
||||
let config = WebSocketConfig {
|
||||
max_message_size: settings.limits.max_ws_message_bytes,
|
||||
max_frame_size: settings.limits.max_ws_frame_bytes,
|
||||
..Default::default()
|
||||
};
|
||||
//create a websocket stream from the upgraded object
|
||||
let ws_stream = WebSocketStream::from_raw_socket(
|
||||
//pass the upgraded object
|
||||
//as the base layer stream of the Websocket
|
||||
upgraded,
|
||||
tokio_tungstenite::tungstenite::protocol::Role::Server,
|
||||
Some(config),
|
||||
)
|
||||
.await;
|
||||
let origin = get_header_string("origin", request.headers());
|
||||
let user_agent = get_header_string("user-agent", request.headers());
|
||||
// determine the remote IP from headers if the exist
|
||||
let header_ip = settings
|
||||
.network
|
||||
.remote_ip_header
|
||||
.as_ref()
|
||||
.and_then(|x| get_header_string(x, request.headers()));
|
||||
// use the socket addr as a backup
|
||||
let remote_ip =
|
||||
header_ip.unwrap_or_else(|| remote_addr.ip().to_string());
|
||||
let client_info = ClientInfo {
|
||||
remote_ip,
|
||||
user_agent,
|
||||
origin,
|
||||
};
|
||||
// spawn a nostr server with our websocket
|
||||
tokio::spawn(nostr_server(
|
||||
pool,
|
||||
client_info,
|
||||
settings,
|
||||
ws_stream,
|
||||
broadcast,
|
||||
event_tx,
|
||||
shutdown,
|
||||
));
|
||||
}
|
||||
// todo: trace, don't print...
|
||||
Err(e) => println!(
|
||||
"error when trying to upgrade connection \
|
||||
from address {} to websocket connection. \
|
||||
Error is: {}",
|
||||
remote_addr, e
|
||||
),
|
||||
}
|
||||
});
|
||||
//return the response to the handshake request
|
||||
response
|
||||
}
|
||||
Err(error) => {
|
||||
warn!("websocket response failed");
|
||||
let mut res =
|
||||
Response::new(Body::from(format!("Failed to create websocket: {}", error)));
|
||||
*res.status_mut() = StatusCode::BAD_REQUEST;
|
||||
return Ok(res);
|
||||
}
|
||||
};
|
||||
Ok::<_, Infallible>(response)
|
||||
}
|
||||
// Request for Relay info
|
||||
("/", false) => {
|
||||
// handle request at root with no upgrade header
|
||||
// Check if this is a nostr server info request
|
||||
let accept_header = &request.headers().get(ACCEPT);
|
||||
// check if application/nostr+json is included
|
||||
if let Some(media_types) = accept_header {
|
||||
if let Ok(mt_str) = media_types.to_str() {
|
||||
if mt_str.contains("application/nostr+json") {
|
||||
// build a relay info response
|
||||
debug!("Responding to server info request");
|
||||
let rinfo = RelayInfo::from(settings.info);
|
||||
let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap());
|
||||
return Ok(Response::builder()
|
||||
.status(200)
|
||||
.header("Content-Type", "application/nostr+json")
|
||||
.header("Access-Control-Allow-Origin", "*")
|
||||
.body(b)
|
||||
.unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Response::builder()
|
||||
.status(200)
|
||||
.header("Content-Type", "text/plain")
|
||||
.body(Body::from("Please use a Nostr client to connect."))
|
||||
.unwrap())
|
||||
}
|
||||
(_, _) => {
|
||||
//handle any other url
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::from("Nothing here."))
|
||||
.unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_header_string(header: &str, headers: &HeaderMap) -> Option<String> {
|
||||
headers
|
||||
.get(header)
|
||||
.and_then(|x| x.to_str().ok().map(|x| x.to_string()))
|
||||
}
|
||||
|
||||
// return on a control-c or internally requested shutdown signal
|
||||
async fn ctrl_c_or_signal(mut shutdown_signal: Receiver<()>) {
|
||||
let mut term_signal = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
|
||||
.expect("could not define signal");
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = shutdown_signal.recv() => {
|
||||
info!("Shutting down webserver as requested");
|
||||
// server shutting down, exit loop
|
||||
break;
|
||||
},
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
info!("Shutting down webserver due to SIGINT");
|
||||
break;
|
||||
},
|
||||
_ = term_signal.recv() => {
|
||||
info!("Shutting down webserver due to SIGTERM");
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Start running a Nostr relay server.
|
||||
pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result<(), Error> {
|
||||
trace!("Config: {:?}", settings);
|
||||
// do some config validation.
|
||||
if !Path::new(&settings.database.data_directory).is_dir() {
|
||||
error!("Database directory does not exist");
|
||||
return Err(Error::DatabaseDirError);
|
||||
}
|
||||
let addr = format!(
|
||||
"{}:{}",
|
||||
settings.network.address.trim(),
|
||||
settings.network.port
|
||||
);
|
||||
let socket_addr = addr.parse().expect("listening address not valid");
|
||||
// address whitelisting settings
|
||||
if let Some(addr_whitelist) = &settings.authorization.pubkey_whitelist {
|
||||
info!(
|
||||
"Event publishing restricted to {} pubkey(s)",
|
||||
addr_whitelist.len()
|
||||
);
|
||||
}
|
||||
// check if NIP-05 enforced user verification is on
|
||||
if settings.verified_users.is_active() {
|
||||
info!(
|
||||
"NIP-05 user verification mode:{:?}",
|
||||
settings.verified_users.mode
|
||||
);
|
||||
if let Some(d) = settings.verified_users.verify_update_duration() {
|
||||
info!("NIP-05 check user verification every: {:?}", d);
|
||||
}
|
||||
if let Some(d) = settings.verified_users.verify_expiration_duration() {
|
||||
info!("NIP-05 user verification expires after: {:?}", d);
|
||||
}
|
||||
if let Some(wl) = &settings.verified_users.domain_whitelist {
|
||||
info!("NIP-05 domain whitelist: {:?}", wl);
|
||||
}
|
||||
if let Some(bl) = &settings.verified_users.domain_blacklist {
|
||||
info!("NIP-05 domain blacklist: {:?}", bl);
|
||||
}
|
||||
}
|
||||
// configure tokio runtime
|
||||
let rt = Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.thread_name_fn(|| {
|
||||
// give each thread a unique numeric name
|
||||
static ATOMIC_ID: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(0);
|
||||
let id = ATOMIC_ID.fetch_add(1,Ordering::SeqCst);
|
||||
format!("tokio-ws-{}", id)
|
||||
})
|
||||
// limit concurrent SQLite blocking threads
|
||||
.max_blocking_threads(settings.limits.max_blocking_threads)
|
||||
.on_thread_start(|| {
|
||||
trace!("started new thread: {:?}", std::thread::current().name());
|
||||
})
|
||||
.on_thread_stop(|| {
|
||||
trace!("stopped thread: {:?}", std::thread::current().name());
|
||||
})
|
||||
.build()
|
||||
.unwrap();
|
||||
// start tokio
|
||||
rt.block_on(async {
|
||||
let broadcast_buffer_limit = settings.limits.broadcast_buffer;
|
||||
let persist_buffer_limit = settings.limits.event_persist_buffer;
|
||||
let verified_users_active = settings.verified_users.is_active();
|
||||
let db_min_conn = settings.database.min_conn;
|
||||
let db_max_conn = settings.database.max_conn;
|
||||
let settings = settings.clone();
|
||||
info!("listening on: {}", socket_addr);
|
||||
// all client-submitted valid events are broadcast to every
|
||||
// other client on this channel. This should be large enough
|
||||
// to accomodate slower readers (messages are dropped if
|
||||
// clients can not keep up).
|
||||
let (bcast_tx, _) = broadcast::channel::<Event>(broadcast_buffer_limit);
|
||||
// validated events that need to be persisted are sent to the
|
||||
// database on via this channel.
|
||||
let (event_tx, event_rx) = mpsc::channel::<SubmittedEvent>(persist_buffer_limit);
|
||||
// establish a channel for letting all threads now about a
|
||||
// requested server shutdown.
|
||||
let (invoke_shutdown, shutdown_listen) = broadcast::channel::<()>(1);
|
||||
// create a channel for sending any new metadata event. These
|
||||
// will get processed relatively slowly (a potentially
|
||||
// multi-second blocking HTTP call) on a single thread, so we
|
||||
// buffer requests on the channel. No harm in dropping events
|
||||
// here, since we are protecting against DoS. This can make
|
||||
// it difficult to setup initial metadata in bulk, since
|
||||
// overwhelming this will drop events and won't register
|
||||
// metadata events.
|
||||
let (metadata_tx, metadata_rx) = broadcast::channel::<Event>(4096);
|
||||
// start the database writer thread. Give it a channel for
|
||||
// writing events, and for publishing events that have been
|
||||
// written (to all connected clients).
|
||||
db::db_writer(
|
||||
settings.clone(),
|
||||
event_rx,
|
||||
bcast_tx.clone(),
|
||||
metadata_tx.clone(),
|
||||
shutdown_listen,
|
||||
)
|
||||
.await;
|
||||
info!("db writer created");
|
||||
|
||||
// create a nip-05 verifier thread; if enabled.
|
||||
if settings.verified_users.mode != VerifiedUsersMode::Disabled {
|
||||
let verifier_opt =
|
||||
nip05::Verifier::new(metadata_rx, bcast_tx.clone(), settings.clone());
|
||||
if let Ok(mut v) = verifier_opt {
|
||||
if verified_users_active {
|
||||
tokio::task::spawn(async move {
|
||||
info!("starting up NIP-05 verifier...");
|
||||
v.run().await;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
// build a connection pool for DB maintenance
|
||||
let maintenance_pool = db::build_pool(
|
||||
"maintenance writer",
|
||||
&settings,
|
||||
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
|
||||
1,
|
||||
2,
|
||||
false,
|
||||
);
|
||||
db::db_optimize(maintenance_pool.clone()).await;
|
||||
db::db_checkpoint(maintenance_pool).await;
|
||||
|
||||
// listen for (external to tokio) shutdown request
|
||||
let controlled_shutdown = invoke_shutdown.clone();
|
||||
tokio::spawn(async move {
|
||||
info!("control message listener started");
|
||||
match shutdown_rx.recv() {
|
||||
Ok(()) => {
|
||||
info!("control message requesting shutdown");
|
||||
controlled_shutdown.send(()).ok();
|
||||
}
|
||||
Err(std::sync::mpsc::RecvError) => {
|
||||
// FIXME: spurious error on startup?
|
||||
debug!("shutdown requestor is disconnected");
|
||||
}
|
||||
};
|
||||
});
|
||||
// listen for ctrl-c interruupts
|
||||
let ctrl_c_shutdown = invoke_shutdown.clone();
|
||||
// listener for webserver shutdown
|
||||
let webserver_shutdown_listen = invoke_shutdown.subscribe();
|
||||
|
||||
tokio::spawn(async move {
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
info!("shutting down due to SIGINT (main)");
|
||||
ctrl_c_shutdown.send(()).ok();
|
||||
});
|
||||
// build a connection pool for sqlite connections
|
||||
let pool = db::build_pool(
|
||||
"client query",
|
||||
&settings,
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
|
||||
db_min_conn,
|
||||
db_max_conn,
|
||||
true,
|
||||
);
|
||||
// spawn a task to check the pool size.
|
||||
let pool_monitor = pool.clone();
|
||||
tokio::spawn(async move {db::monitor_pool("reader", pool_monitor).await;});
|
||||
|
||||
// A `Service` is needed for every connection, so this
|
||||
// creates one from our `handle_request` function.
|
||||
let make_svc = make_service_fn(|conn: &AddrStream| {
|
||||
let svc_pool = pool.clone();
|
||||
let remote_addr = conn.remote_addr();
|
||||
let bcast = bcast_tx.clone();
|
||||
let event = event_tx.clone();
|
||||
let stop = invoke_shutdown.clone();
|
||||
let settings = settings.clone();
|
||||
async move {
|
||||
// service_fn converts our function into a `Service`
|
||||
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
|
||||
handle_web_request(
|
||||
request,
|
||||
svc_pool.clone(),
|
||||
settings.clone(),
|
||||
remote_addr,
|
||||
bcast.clone(),
|
||||
event.clone(),
|
||||
stop.subscribe(),
|
||||
)
|
||||
}))
|
||||
}
|
||||
});
|
||||
let server = Server::bind(&socket_addr)
|
||||
.serve(make_svc)
|
||||
.with_graceful_shutdown(ctrl_c_or_signal(webserver_shutdown_listen));
|
||||
// run hyper in this thread. This is why the thread does not return.
|
||||
if let Err(e) = server.await {
|
||||
eprintln!("server error: {}", e);
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Nostr protocol messages from a client
|
||||
#[derive(Deserialize, Serialize, Clone, PartialEq, Eq, Debug)]
|
||||
#[serde(untagged)]
|
||||
pub enum NostrMessage {
|
||||
/// An `EVENT` message
|
||||
EventMsg(EventCmd),
|
||||
/// A `REQ` message
|
||||
SubMsg(Subscription),
|
||||
/// A `CLOSE` message
|
||||
CloseMsg(CloseCmd),
|
||||
}
|
||||
|
||||
/// Convert Message to NostrMessage
|
||||
fn convert_to_msg(msg: String, max_bytes: Option<usize>) -> Result<NostrMessage> {
|
||||
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
|
||||
match parsed_res {
|
||||
Ok(m) => {
|
||||
if let NostrMessage::EventMsg(_) = m {
|
||||
if let Some(max_size) = max_bytes {
|
||||
// check length, ensure that some max size is set.
|
||||
if msg.len() > max_size && max_size > 0 {
|
||||
return Err(Error::EventMaxLengthError(msg.len()));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(m)
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("proto parse error: {:?}", e);
|
||||
debug!("parse error on message: {}", msg.trim());
|
||||
Err(Error::ProtoParseError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Turn a string into a NOTICE message ready to send over a WebSocket
|
||||
fn make_notice_message(notice: Notice) -> Message {
|
||||
let json = match notice {
|
||||
Notice::Message(ref msg) => json!(["NOTICE", msg]),
|
||||
Notice::EventResult(ref res) => json!(["OK", res.id, res.status.to_bool(), res.msg]),
|
||||
};
|
||||
|
||||
Message::text(json.to_string())
|
||||
}
|
||||
|
||||
struct ClientInfo {
|
||||
remote_ip: String,
|
||||
user_agent: Option<String>,
|
||||
origin: Option<String>,
|
||||
}
|
||||
|
||||
/// Handle new client connections. This runs through an event loop
|
||||
/// for all client communication.
|
||||
async fn nostr_server(
|
||||
pool: db::SqlitePool,
|
||||
client_info: ClientInfo,
|
||||
settings: Settings,
|
||||
mut ws_stream: WebSocketStream<Upgraded>,
|
||||
broadcast: Sender<Event>,
|
||||
event_tx: mpsc::Sender<SubmittedEvent>,
|
||||
mut shutdown: Receiver<()>,
|
||||
) {
|
||||
// the time this websocket nostr server started
|
||||
let orig_start = Instant::now();
|
||||
// get a broadcast channel for clients to communicate on
|
||||
let mut bcast_rx = broadcast.subscribe();
|
||||
// Track internal client state
|
||||
let mut conn = conn::ClientConn::new(client_info.remote_ip);
|
||||
// subscription creation rate limiting
|
||||
let mut sub_lim_opt = None;
|
||||
// 100ms jitter when the rate limiter returns
|
||||
let jitter = Jitter::up_to(Duration::from_millis(100));
|
||||
let sub_per_min_setting = settings.limits.subscriptions_per_min;
|
||||
if let Some(sub_per_min) = sub_per_min_setting {
|
||||
if sub_per_min > 0 {
|
||||
trace!("Rate limits for sub creation ({}/min)", sub_per_min);
|
||||
let quota_time = core::num::NonZeroU32::new(sub_per_min).unwrap();
|
||||
let quota = Quota::per_minute(quota_time);
|
||||
sub_lim_opt = Some(RateLimiter::direct(quota));
|
||||
}
|
||||
}
|
||||
// Use the remote IP as the client identifier
|
||||
let cid = conn.get_client_prefix();
|
||||
// Create a channel for receiving query results from the database.
|
||||
// we will send out the tx handle to any query we generate.
|
||||
// this has capacity for some of the larger requests we see, which
|
||||
// should allow the DB thread to release the handle earlier.
|
||||
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(20000);
|
||||
// Create channel for receiving NOTICEs
|
||||
let (notice_tx, mut notice_rx) = mpsc::channel::<Notice>(128);
|
||||
|
||||
// last time this client sent data (message, ping, etc.)
|
||||
let mut last_message_time = Instant::now();
|
||||
|
||||
// ping interval (every 5 minutes)
|
||||
let default_ping_dur = Duration::from_secs(settings.network.ping_interval_seconds.into());
|
||||
|
||||
// disconnect after 20 minutes without a ping response or event.
|
||||
let max_quiet_time = Duration::from_secs(60 * 20);
|
||||
|
||||
let start = tokio::time::Instant::now() + default_ping_dur;
|
||||
let mut ping_interval = tokio::time::interval_at(start, default_ping_dur);
|
||||
|
||||
// maintain a hashmap of a oneshot channel for active subscriptions.
|
||||
// when these subscriptions are cancelled, make a message
|
||||
// available to the executing query so it knows to stop.
|
||||
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
|
||||
// for stats, keep track of how many events the client published,
|
||||
// and how many it received from queries.
|
||||
let mut client_published_event_count: usize = 0;
|
||||
let mut client_received_event_count: usize = 0;
|
||||
debug!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
|
||||
let origin = client_info.origin.unwrap_or_else(|| "<unspecified>".into());
|
||||
let user_agent = client_info
|
||||
.user_agent
|
||||
.unwrap_or_else(|| "<unspecified>".into());
|
||||
debug!(
|
||||
"cid: {}, origin: {:?}, user-agent: {:?}",
|
||||
cid, origin, user_agent
|
||||
);
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = shutdown.recv() => {
|
||||
info!("Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?}", cid, conn.ip(), orig_start.elapsed());
|
||||
// server shutting down, exit loop
|
||||
break;
|
||||
},
|
||||
_ = ping_interval.tick() => {
|
||||
// check how long since we talked to client
|
||||
// if it has been too long, disconnect
|
||||
if last_message_time.elapsed() > max_quiet_time {
|
||||
debug!("ending connection due to lack of client ping response");
|
||||
break;
|
||||
}
|
||||
// Send a ping
|
||||
ws_stream.send(Message::Ping(Vec::new())).await.ok();
|
||||
},
|
||||
Some(notice_msg) = notice_rx.recv() => {
|
||||
ws_stream.send(make_notice_message(notice_msg)).await.ok();
|
||||
},
|
||||
Some(query_result) = query_rx.recv() => {
|
||||
// database informed us of a query result we asked for
|
||||
let subesc = query_result.sub_id.replace('"', "");
|
||||
if query_result.event == "EOSE" {
|
||||
let send_str = format!("[\"EOSE\",\"{}\"]", subesc);
|
||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||
} else {
|
||||
client_received_event_count += 1;
|
||||
// send a result
|
||||
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
|
||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||
}
|
||||
},
|
||||
// TODO: consider logging the LaggedRecv error
|
||||
Ok(global_event) = bcast_rx.recv() => {
|
||||
// an event has been broadcast to all clients
|
||||
// first check if there is a subscription for this event.
|
||||
for (s, sub) in conn.subscriptions() {
|
||||
if !sub.interested_in_event(&global_event) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO: serialize at broadcast time, instead of
|
||||
// once for each consumer.
|
||||
if let Ok(event_str) = serde_json::to_string(&global_event) {
|
||||
trace!("sub match for client: {}, sub: {:?}, event: {:?}",
|
||||
cid, s,
|
||||
global_event.get_event_id_prefix());
|
||||
// create an event response and send it
|
||||
let subesc = s.replace('"', "");
|
||||
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{}\",{}]", subesc, event_str))).await.ok();
|
||||
} else {
|
||||
warn!("could not serialize event: {:?}", global_event.get_event_id_prefix());
|
||||
}
|
||||
}
|
||||
},
|
||||
ws_next = ws_stream.next() => {
|
||||
// update most recent message time for client
|
||||
last_message_time = Instant::now();
|
||||
// Consume text messages from the client, parse into Nostr messages.
|
||||
let nostr_msg = match ws_next {
|
||||
Some(Ok(Message::Text(m))) => {
|
||||
convert_to_msg(m,settings.limits.max_event_bytes)
|
||||
},
|
||||
Some(Ok(Message::Binary(_))) => {
|
||||
ws_stream.send(
|
||||
make_notice_message(Notice::message("binary messages are not accepted".into()))).await.ok();
|
||||
continue;
|
||||
},
|
||||
Some(Ok(Message::Ping(_) | Message::Pong(_))) => {
|
||||
// get a ping/pong, ignore. tungstenite will
|
||||
// send responses automatically.
|
||||
continue;
|
||||
},
|
||||
Some(Err(WsError::Capacity(MessageTooLong{size, max_size}))) => {
|
||||
ws_stream.send(
|
||||
make_notice_message(Notice::message(format!("message too large ({} > {})",size, max_size)))).await.ok();
|
||||
continue;
|
||||
},
|
||||
None |
|
||||
Some(Ok(Message::Close(_)) |
|
||||
Err(WsError::AlreadyClosed | WsError::ConnectionClosed |
|
||||
WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
|
||||
=> {
|
||||
debug!("websocket close from client (cid: {}, ip: {:?})",cid, conn.ip());
|
||||
break;
|
||||
},
|
||||
Some(Err(WsError::Io(e))) => {
|
||||
// IO errors are considered fatal
|
||||
warn!("IO error (cid: {}, ip: {:?}): {:?}", cid, conn.ip(), e);
|
||||
break;
|
||||
}
|
||||
x => {
|
||||
// default condition on error is to close the client connection
|
||||
info!("unknown error (cid: {}, ip: {:?}): {:?} (closing conn)", cid, conn.ip(), x);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
// convert ws_next into proto_next
|
||||
match nostr_msg {
|
||||
Ok(NostrMessage::EventMsg(ec)) => {
|
||||
// An EventCmd needs to be validated to be converted into an Event
|
||||
// handle each type of message
|
||||
let evid = ec.event_id().to_owned();
|
||||
let parsed : Result<Event> = Result::<Event>::from(ec);
|
||||
match parsed {
|
||||
Ok(e) => {
|
||||
let id_prefix:String = e.id.chars().take(8).collect();
|
||||
debug!("successfully parsed/validated event: {:?} (cid: {})", id_prefix, cid);
|
||||
// check if the event is too far in the future.
|
||||
if e.is_valid_timestamp(settings.options.reject_future_seconds) {
|
||||
// Write this to the database.
|
||||
let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone() };
|
||||
event_tx.send(submit_event).await.ok();
|
||||
client_published_event_count += 1;
|
||||
} else {
|
||||
info!("client: {} sent a far future-dated event", cid);
|
||||
if let Some(fut_sec) = settings.options.reject_future_seconds {
|
||||
let msg = format!("The event created_at field is out of the acceptable range (+{}sec) for this relay.",fut_sec);
|
||||
let notice = Notice::invalid(e.id, &msg);
|
||||
ws_stream.send(make_notice_message(notice)).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
info!("client sent an invalid event (cid: {})", cid);
|
||||
ws_stream.send(make_notice_message(Notice::invalid(evid, &format!("{}", e)))).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
Ok(NostrMessage::SubMsg(s)) => {
|
||||
debug!("subscription requested (cid: {}, sub: {:?})", cid, s.id);
|
||||
// subscription handling consists of:
|
||||
// * check for rate limits
|
||||
// * registering the subscription so future events can be matched
|
||||
// * making a channel to cancel to request later
|
||||
// * sending a request for a SQL query
|
||||
// Do nothing if the sub already exists.
|
||||
if !conn.has_subscription(&s) {
|
||||
if let Some(ref lim) = sub_lim_opt {
|
||||
lim.until_ready_with_jitter(jitter).await;
|
||||
}
|
||||
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
|
||||
match conn.subscribe(s.clone()) {
|
||||
Ok(()) => {
|
||||
// when we insert, if there was a previous query running with the same name, cancel it.
|
||||
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
|
||||
previous_query.send(()).ok();
|
||||
}
|
||||
// start a database query. this spawns a blocking database query on a worker thread.
|
||||
db::db_query(s, cid.to_owned(), pool.clone(), query_tx.clone(), abandon_query_rx).await;
|
||||
},
|
||||
Err(e) => {
|
||||
info!("Subscription error: {} (cid: {}, sub: {:?})", e, cid, s.id);
|
||||
ws_stream.send(make_notice_message(Notice::message(format!("Subscription error: {}", e)))).await.ok();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("client sent duplicate subscription, ignoring (cid: {}, sub: {:?})", cid, s.id);
|
||||
}
|
||||
},
|
||||
Ok(NostrMessage::CloseMsg(cc)) => {
|
||||
// closing a request simply removes the subscription.
|
||||
let parsed : Result<Close> = Result::<Close>::from(cc);
|
||||
if let Ok(c) = parsed {
|
||||
// check if a query is currently
|
||||
// running, and remove it if so.
|
||||
let stop_tx = running_queries.remove(&c.id);
|
||||
if let Some(tx) = stop_tx {
|
||||
tx.send(()).ok();
|
||||
}
|
||||
// stop checking new events against
|
||||
// the subscription
|
||||
conn.unsubscribe(&c);
|
||||
} else {
|
||||
info!("invalid command ignored");
|
||||
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
|
||||
}
|
||||
},
|
||||
Err(Error::ConnError) => {
|
||||
debug!("got connection close/error, disconnecting cid: {}, ip: {:?}",cid, conn.ip());
|
||||
break;
|
||||
}
|
||||
Err(Error::EventMaxLengthError(s)) => {
|
||||
info!("client sent event larger ({} bytes) than max size (cid: {})", s, cid);
|
||||
ws_stream.send(make_notice_message(Notice::message("event exceeded max size".into()))).await.ok();
|
||||
},
|
||||
Err(Error::ProtoParseError) => {
|
||||
info!("client sent event that could not be parsed (cid: {})", cid);
|
||||
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
|
||||
},
|
||||
Err(e) => {
|
||||
info!("got non-fatal error from client (cid: {}, error: {:?}", cid, e);
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
// connection cleanup - ensure any still running queries are terminated.
|
||||
for (_, stop_tx) in running_queries {
|
||||
stop_tx.send(()).ok();
|
||||
}
|
||||
info!(
|
||||
"stopping client connection (cid: {}, ip: {:?}, sent: {} events, recv: {} events, connected: {:?})",
|
||||
cid,
|
||||
conn.ip(),
|
||||
client_published_event_count,
|
||||
client_received_event_count,
|
||||
orig_start.elapsed()
|
||||
);
|
||||
}
|
@@ -8,7 +8,7 @@ use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// Subscription identifier and set of request filters
|
||||
#[derive(Serialize, PartialEq, Debug, Clone)]
|
||||
#[derive(Serialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Subscription {
|
||||
pub id: String,
|
||||
pub filters: Vec<ReqFilter>,
|
||||
@@ -19,7 +19,7 @@ pub struct Subscription {
|
||||
/// Corresponds to client-provided subscription request elements. Any
|
||||
/// element can be present if it should be used in filtering, or
|
||||
/// absent ([`None`]) if it should be ignored.
|
||||
#[derive(Serialize, PartialEq, Debug, Clone)]
|
||||
#[derive(Serialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct ReqFilter {
|
||||
/// Event hashes
|
||||
pub ids: Option<Vec<String>>,
|
||||
@@ -32,10 +32,15 @@ pub struct ReqFilter {
|
||||
/// List of author public keys
|
||||
pub authors: Option<Vec<String>>,
|
||||
/// Limit number of results
|
||||
pub limit: Option<u32>,
|
||||
pub limit: Option<u64>,
|
||||
/// Set of tags
|
||||
#[serde(skip)]
|
||||
pub tags: Option<HashMap<String, HashSet<String>>>,
|
||||
pub tags: Option<HashMap<char, HashSet<String>>>,
|
||||
/// Force no matches due to malformed data
|
||||
// we can't represent it in the req filter, so we don't want to
|
||||
// erroneously match. This basically indicates the req tried to
|
||||
// do something invalid.
|
||||
pub force_no_match: bool,
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ReqFilter {
|
||||
@@ -58,13 +63,23 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
||||
authors: None,
|
||||
limit: None,
|
||||
tags: None,
|
||||
force_no_match: false,
|
||||
};
|
||||
let empty_string = "".into();
|
||||
let mut ts = None;
|
||||
// iterate through each key, and assign values that exist
|
||||
for (key, val) in filter.into_iter() {
|
||||
// ids
|
||||
if key == "ids" {
|
||||
rf.ids = Deserialize::deserialize(val).ok();
|
||||
let raw_ids: Option<Vec<String>>= Deserialize::deserialize(val).ok();
|
||||
if let Some(a) = raw_ids.as_ref() {
|
||||
if a.contains(&empty_string) {
|
||||
return Err(serde::de::Error::invalid_type(
|
||||
Unexpected::Other("prefix matches must not be empty strings"),
|
||||
&"a json object"));
|
||||
}
|
||||
}
|
||||
rf.ids =raw_ids;
|
||||
} else if key == "kinds" {
|
||||
rf.kinds = Deserialize::deserialize(val).ok();
|
||||
} else if key == "since" {
|
||||
@@ -74,21 +89,33 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
||||
} else if key == "limit" {
|
||||
rf.limit = Deserialize::deserialize(val).ok();
|
||||
} else if key == "authors" {
|
||||
rf.authors = Deserialize::deserialize(val).ok();
|
||||
let raw_authors: Option<Vec<String>>= Deserialize::deserialize(val).ok();
|
||||
if let Some(a) = raw_authors.as_ref() {
|
||||
if a.contains(&empty_string) {
|
||||
return Err(serde::de::Error::invalid_type(
|
||||
Unexpected::Other("prefix matches must not be empty strings"),
|
||||
&"a json object"));
|
||||
}
|
||||
}
|
||||
rf.authors = raw_authors;
|
||||
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
|
||||
// remove the prefix
|
||||
let tagname = &key[1..];
|
||||
if ts.is_none() {
|
||||
// Initialize the tag if necessary
|
||||
ts = Some(HashMap::new());
|
||||
}
|
||||
if let Some(m) = ts.as_mut() {
|
||||
let tag_vals: Option<Vec<String>> = Deserialize::deserialize(val).ok();
|
||||
if let Some(v) = tag_vals {
|
||||
let hs = HashSet::from_iter(v.into_iter());
|
||||
m.insert(tagname.to_owned(), hs);
|
||||
if let Some(tag_search) = tag_search_char_from_filter(key) {
|
||||
if ts.is_none() {
|
||||
// Initialize the tag if necessary
|
||||
ts = Some(HashMap::new());
|
||||
}
|
||||
};
|
||||
if let Some(m) = ts.as_mut() {
|
||||
let tag_vals: Option<Vec<String>> = Deserialize::deserialize(val).ok();
|
||||
if let Some(v) = tag_vals {
|
||||
let hs = HashSet::from_iter(v.into_iter());
|
||||
m.insert(tag_search.to_owned(), hs);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
// tag search that is multi-character, don't add to subscription
|
||||
rf.force_no_match = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
rf.tags = ts;
|
||||
@@ -96,6 +123,26 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to form a single-char identifier from a tag search filter
|
||||
fn tag_search_char_from_filter(tagname: &str) -> Option<char> {
|
||||
let tagname_nohash = &tagname[1..];
|
||||
// We return the tag character if and only if the tagname consists
|
||||
// of a single char.
|
||||
let mut tagnamechars = tagname_nohash.chars();
|
||||
let firstchar = tagnamechars.next();
|
||||
match firstchar {
|
||||
Some(_) => {
|
||||
// check second char
|
||||
if tagnamechars.next().is_none() {
|
||||
firstchar
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Subscription {
|
||||
/// Custom deserializer for subscriptions, which have a more
|
||||
/// complex structure than the other message types.
|
||||
@@ -190,11 +237,22 @@ impl ReqFilter {
|
||||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
fn delegated_authors_match(&self, event: &Event) -> bool {
|
||||
if let Some(delegated_pubkey) = &event.delegated_by {
|
||||
self.authors
|
||||
.as_ref()
|
||||
.map(|vs| prefix_match(vs, delegated_pubkey))
|
||||
.unwrap_or(true)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn tag_match(&self, event: &Event) -> bool {
|
||||
// get the hashset from the filter.
|
||||
if let Some(map) = &self.tags {
|
||||
for (key, val) in map.iter() {
|
||||
let tag_match = event.generic_tag_val_intersect(key, val);
|
||||
let tag_match = event.generic_tag_val_intersect(*key, val);
|
||||
// if there is no match for this tag, the match fails.
|
||||
if !tag_match {
|
||||
return false;
|
||||
@@ -221,8 +279,9 @@ impl ReqFilter {
|
||||
&& self.since.map(|t| event.created_at > t).unwrap_or(true)
|
||||
&& self.until.map(|t| event.created_at < t).unwrap_or(true)
|
||||
&& self.kind_match(event.kind)
|
||||
&& self.authors_match(event)
|
||||
&& (self.authors_match(event) || self.delegated_authors_match(event))
|
||||
&& self.tag_match(event)
|
||||
&& !self.force_no_match
|
||||
}
|
||||
}
|
||||
|
||||
@@ -252,6 +311,24 @@ mod tests {
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn req_empty_authors_prefix() {
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"authors\": [\"\"]}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn req_empty_ids_prefix() {
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\"]}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn req_empty_ids_prefix_mixed() {
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\",\"aaa\"]}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn legacy_filter() {
|
||||
// legacy field in filter
|
||||
@@ -280,6 +357,7 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "foo".to_owned(),
|
||||
pubkey: "abcd".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
@@ -298,6 +376,7 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "abcd".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
@@ -316,6 +395,7 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "abcde".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
@@ -335,6 +415,7 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "abc".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 50,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
@@ -358,6 +439,7 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "abc".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 150,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
@@ -379,6 +461,7 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "abc".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 50,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
@@ -397,6 +480,7 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "abc".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 1001,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
@@ -415,6 +499,7 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "abc".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
@@ -433,6 +518,7 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "123".to_owned(),
|
||||
pubkey: "abc".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
@@ -443,14 +529,15 @@ mod tests {
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
|
||||
#[test]
|
||||
fn authors_multi_pubkey() -> Result<()> {
|
||||
// check for any of a set of authors, against the pubkey
|
||||
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors":["abc", "bcd"]}]"#)?;
|
||||
let e = Event {
|
||||
id: "123".to_owned(),
|
||||
pubkey: "bcd".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
@@ -469,6 +556,7 @@ mod tests {
|
||||
let e = Event {
|
||||
id: "123".to_owned(),
|
||||
pubkey: "xyz".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
|
18
src/utils.rs
18
src/utils.rs
@@ -13,3 +13,21 @@ pub fn unix_time() -> u64 {
|
||||
pub fn is_hex(s: &str) -> bool {
|
||||
s.chars().all(|x| char::is_ascii_hexdigit(&x))
|
||||
}
|
||||
|
||||
/// Check if a string contains only lower-case hex chars.
|
||||
pub fn is_lower_hex(s: &str) -> bool {
|
||||
s.chars().all(|x| {
|
||||
(char::is_ascii_lowercase(&x) || char::is_ascii_digit(&x)) && char::is_ascii_hexdigit(&x)
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn lower_hex() {
|
||||
let hexstr = "abcd0123";
|
||||
assert_eq!(is_lower_hex(hexstr), true);
|
||||
}
|
||||
}
|
||||
|
110
tests/common/mod.rs
Normal file
110
tests/common/mod.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
use nostr_rs_relay::config;
|
||||
use nostr_rs_relay::server::start_server;
|
||||
//use http::{Request, Response};
|
||||
use hyper::{Client, StatusCode, Uri};
|
||||
use std::net::TcpListener;
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
use std::sync::mpsc as syncmpsc;
|
||||
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
|
||||
use std::thread;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, info};
|
||||
|
||||
pub struct Relay {
|
||||
pub port: u16,
|
||||
pub handle: JoinHandle<()>,
|
||||
pub shutdown_tx: MpscSender<()>,
|
||||
}
|
||||
|
||||
pub fn start_relay() -> Result<Relay> {
|
||||
// setup tracing
|
||||
let _trace_sub = tracing_subscriber::fmt::try_init();
|
||||
info!("Starting a new relay");
|
||||
// replace default settings
|
||||
let mut settings = config::Settings::default();
|
||||
// identify open port
|
||||
info!("Checking for address...");
|
||||
let port = get_available_port().unwrap();
|
||||
info!("Found open port: {}", port);
|
||||
// bind to local interface only
|
||||
settings.network.address = "127.0.0.1".to_owned();
|
||||
settings.network.port = port;
|
||||
// create an in-memory DB with multiple readers
|
||||
settings.database.in_memory = true;
|
||||
settings.database.min_conn = 4;
|
||||
settings.database.max_conn = 8;
|
||||
let (shutdown_tx, shutdown_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
|
||||
let handle = thread::spawn(|| {
|
||||
// server will block the thread it is run on.
|
||||
let _ = start_server(settings, shutdown_rx);
|
||||
});
|
||||
// how do we know the relay has finished starting up?
|
||||
Ok(Relay {
|
||||
port,
|
||||
handle,
|
||||
shutdown_tx,
|
||||
})
|
||||
}
|
||||
|
||||
// check if the server is healthy via HTTP request
|
||||
async fn server_ready(relay: &Relay) -> Result<bool> {
|
||||
let uri: String = format!("http://127.0.0.1:{}/", relay.port);
|
||||
let client = Client::new();
|
||||
let uri: Uri = uri.parse().unwrap();
|
||||
let res = client.get(uri).await?;
|
||||
Ok(res.status() == StatusCode::OK)
|
||||
}
|
||||
|
||||
pub async fn wait_for_healthy_relay(relay: &Relay) -> Result<()> {
|
||||
// TODO: maximum time to wait for server to become healthy.
|
||||
// give it a little time to start up before we start polling
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
loop {
|
||||
let server_check = server_ready(relay).await;
|
||||
match server_check {
|
||||
Ok(true) => {
|
||||
// server responded with 200-OK.
|
||||
break;
|
||||
}
|
||||
Ok(false) => {
|
||||
// server responded with an error, we're done.
|
||||
return Err(anyhow!("Got non-200-OK from relay"));
|
||||
}
|
||||
Err(_) => {
|
||||
// server is not yet ready, probably connection refused...
|
||||
debug!("Relay not ready, will try again...");
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("relay is ready");
|
||||
Ok(())
|
||||
// simple message sent to web browsers
|
||||
//let mut request = Request::builder()
|
||||
// .uri("https://www.rust-lang.org/")
|
||||
// .header("User-Agent", "my-awesome-agent/1.0");
|
||||
}
|
||||
|
||||
// from https://elliotekj.com/posts/2017/07/25/find-available-tcp-port-rust/
|
||||
// This needed some modification; if multiple tasks all ask for open ports, they will tend to get the same one.
|
||||
// instead we should try to try these incrementally/globally.
|
||||
|
||||
static PORT_COUNTER: AtomicU16 = AtomicU16::new(4030);
|
||||
|
||||
fn get_available_port() -> Option<u16> {
|
||||
let startsearch = PORT_COUNTER.fetch_add(10, Ordering::SeqCst);
|
||||
if startsearch >= 20000 {
|
||||
// wrap around
|
||||
PORT_COUNTER.store(4030, Ordering::Relaxed);
|
||||
}
|
||||
(startsearch..20000).find(|port| port_is_available(*port))
|
||||
}
|
||||
pub fn port_is_available(port: u16) -> bool {
|
||||
info!("checking on port {}", port);
|
||||
match TcpListener::bind(("127.0.0.1", port)) {
|
||||
Ok(_) => true,
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
47
tests/integration_test.rs
Normal file
47
tests/integration_test.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
use anyhow::Result;
|
||||
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
mod common;
|
||||
|
||||
#[tokio::test]
|
||||
async fn start_and_stop() -> Result<()> {
|
||||
// this will be the common pattern for acquiring a new relay:
|
||||
// start a fresh relay, on a port to-be-provided back to us:
|
||||
let relay = common::start_relay()?;
|
||||
// wait for the relay's webserver to start up and deliver a page:
|
||||
common::wait_for_healthy_relay(&relay).await?;
|
||||
let port = relay.port;
|
||||
// just make sure we can startup and shut down.
|
||||
// if we send a shutdown message before the server is listening,
|
||||
// we will get a SendError. Keep sending until someone is
|
||||
// listening.
|
||||
loop {
|
||||
let shutdown_res = relay.shutdown_tx.send(());
|
||||
match shutdown_res {
|
||||
Ok(()) => {
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
}
|
||||
// wait for relay to shutdown
|
||||
let thread_join = relay.handle.join();
|
||||
assert!(thread_join.is_ok());
|
||||
// assert that port is now available.
|
||||
assert!(common::port_is_available(port));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn relay_home_page() -> Result<()> {
|
||||
// get a relay and wait for startup...
|
||||
let relay = common::start_relay()?;
|
||||
common::wait_for_healthy_relay(&relay).await?;
|
||||
// tell relay to shutdown
|
||||
let _res = relay.shutdown_tx.send(());
|
||||
Ok(())
|
||||
}
|
Reference in New Issue
Block a user