mirror of
https://github.com/scsibug/nostr-rs-relay.git
synced 2025-09-01 11:40:48 -04:00
Compare commits
184 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
95748647f0 | ||
|
25480e837f | ||
|
b80b54cd9d | ||
|
8ea732cbe5 | ||
|
0f68c4e5c2 | ||
|
dab2cd5792 | ||
|
f411aa6fc2 | ||
|
d31bbda087 | ||
|
5917bc53b2 | ||
|
91177c61a1 | ||
|
53c2a8051c | ||
|
168cf513ac | ||
|
ea204761c9 | ||
|
c270ae1434 | ||
|
64bd983cb6 | ||
|
1c153bc784 | ||
|
dc11d9a619 | ||
|
cd1557787b | ||
|
86bb7aeb9a | ||
|
ce37fc1a2d | ||
|
2cfd384339 | ||
|
8c013107f9 | ||
|
64a4466d30 | ||
|
1596c23eb4 | ||
|
129badd4e1 | ||
|
6f7c080180 | ||
|
af92561ef6 | ||
|
d833a3e40d | ||
|
462eb46642 | ||
|
cf144d503d | ||
|
fb8375aef2 | ||
|
88ac31b549 | ||
|
677b7d39e9 | ||
|
b24d2f9aaa | ||
|
7a3899d852 | ||
|
818108b793 | ||
|
d10348f7e1 | ||
|
8598e443d8 | ||
|
43222d44e5 | ||
|
7c1516c4fb | ||
|
0c72053a49 | ||
|
3f32ff67ab | ||
|
0b9778d6ca | ||
|
9be04120c7 | ||
|
cc06167e06 | ||
|
b6e33f044f | ||
|
1b2c6f9fca | ||
|
0d8d39ad22 | ||
|
0e851d4f71 | ||
|
3c880b2f49 | ||
|
7a4c9266ec | ||
|
e8557d421b | ||
|
7ca9c864f2 | ||
|
838aafd079 | ||
|
e554b10ac2 | ||
|
b0bfaa48fc | ||
|
2e9b1b6ba7 | ||
|
4d9012d94c | ||
|
ffe7aac066 | ||
|
f9695bd0a9 | ||
|
7c4bf5cc8f | ||
|
e2de162931 | ||
|
4f606615eb | ||
|
84a58ebbcd | ||
|
c48e45686d | ||
|
bbe359364a | ||
|
9e9c494367 | ||
|
5fa24bc9f1 | ||
|
4de7490d97 | ||
|
d0f63dc66e | ||
|
06078648c8 | ||
|
cc0fcc5d66 | ||
|
dfb2096653 | ||
|
486508d192 | ||
|
84b43c144b | ||
|
110500bb46 | ||
|
83f6b11de7 | ||
|
6d1244434b | ||
|
5a91419d34 | ||
|
7adc5c9af7 | ||
|
9dd4571bee | ||
|
9db5a26b9c | ||
|
ac345b5744 | ||
|
675662c7fb | ||
|
505b0cb71f | ||
|
e8aa450802 | ||
|
5a8860bb09 | ||
|
11e43eccf9 | ||
|
50577b2dfa | ||
|
a6cb6f8486 | ||
|
ae5bf98d87 | ||
|
1cf9d719f0 | ||
|
311f4b5283 | ||
|
14b5a51e3a | ||
|
8ecce3f566 | ||
|
caffbbbede | ||
|
81045ad3d0 | ||
|
72f8a1aa5c | ||
|
274c61bb72 | ||
|
6eeefbcc4c | ||
|
3e8adf978f | ||
|
2af5f9fbe8 | ||
|
2739e49362 | ||
|
f9693f7ac3 | ||
|
8a63d88b0b | ||
|
a4df9445b6 | ||
|
92da9d71f8 | ||
|
6633f8b472 | ||
|
93dfed0a87 | ||
|
bef7ca7e27 | ||
|
a98708ba47 | ||
|
ccf9b8d47b | ||
|
8fa58de49a | ||
|
480c5e4e58 | ||
|
5bd00f9107 | ||
|
36b9f628c7 | ||
|
baeb77af99 | ||
|
29b1e8ce58 | ||
|
786a354776 | ||
|
4fa8616c73 | ||
|
74802522c2 | ||
|
9ce5057af8 | ||
|
217429f538 | ||
|
62a9548c27 | ||
|
c24dce8177 | ||
|
3503cf05ed | ||
|
8738e5baa9 | ||
|
78da92ccca | ||
|
72f1c19b21 | ||
|
283967f8cc | ||
|
08b011ad07 | ||
|
2b03f11e5e | ||
|
e48bae10e6 | ||
|
8774416b92 | ||
|
59933ce25e | ||
|
1b9f364e15 | ||
|
4d983dd1e0 | ||
|
11c33582ef | ||
|
a754477a02 | ||
|
a843eaa939 | ||
|
03a130b0b8 | ||
|
9124f4540a | ||
|
77892b2064 | ||
|
4fe6191aa3 | ||
|
79a982e3ef | ||
|
01d81db617 | ||
|
e6fef37d4e | ||
|
4bbfd77fc1 | ||
|
8da6f6555a | ||
|
5bcc63bd56 | ||
|
035cf34673 | ||
|
be8170342e | ||
|
0a3b15f41f | ||
|
2b4b17dbda | ||
|
5058d98ad6 | ||
|
f4ecd43708 | ||
|
a8f465fdc8 | ||
|
1c14adc766 | ||
|
e894a86566 | ||
|
bedc378624 | ||
|
e1c2a6b758 | ||
|
990bb656e8 | ||
|
168cfc3b26 | ||
|
a36ad378f6 | ||
|
538d139ebf | ||
|
23f7730fea | ||
|
8aa1256254 | ||
|
9ed3391b46 | ||
|
4ad483090e | ||
|
9b351aab9b | ||
|
597749890e | ||
|
1d499cf12b | ||
|
ed3a6b9692 | ||
|
048199e30b | ||
|
414e83f696 | ||
|
225c8f762e | ||
|
887fc28ab2 | ||
|
294d3b99c3 | ||
|
53990672ae | ||
|
9c1b21cbfe | ||
|
2f63417646 | ||
|
3b25160852 | ||
|
34ad549cde | ||
|
f8b1fe5035 |
19
.build.yml
Normal file
19
.build.yml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
image: fedora/latest
|
||||||
|
arch: x86_64
|
||||||
|
artifacts:
|
||||||
|
- nostr-rs-relay/target/release/nostr-rs-relay
|
||||||
|
environment:
|
||||||
|
RUST_LOG: debug
|
||||||
|
packages:
|
||||||
|
- cargo
|
||||||
|
- sqlite-devel
|
||||||
|
sources:
|
||||||
|
- https://git.sr.ht/~gheartsfield/nostr-rs-relay/
|
||||||
|
shell: false
|
||||||
|
tasks:
|
||||||
|
- build: |
|
||||||
|
cd nostr-rs-relay
|
||||||
|
cargo build --release
|
||||||
|
- test: |
|
||||||
|
cd nostr-rs-relay
|
||||||
|
cargo test --release
|
2
.cargo/config.toml
Normal file
2
.cargo/config.toml
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
[build]
|
||||||
|
rustflags = ["--cfg", "tokio_unstable"]
|
16
.pre-commit-config.yaml
Normal file
16
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# See https://pre-commit.com for more information
|
||||||
|
# See https://pre-commit.com/hooks.html for more hooks
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v4.3.0
|
||||||
|
hooks:
|
||||||
|
- id: trailing-whitespace
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: check-yaml
|
||||||
|
- id: check-added-large-files
|
||||||
|
- repo: https://github.com/doublify/pre-commit-rust
|
||||||
|
rev: v1.0
|
||||||
|
hooks:
|
||||||
|
- id: fmt
|
||||||
|
- id: cargo-check
|
||||||
|
- id: clippy
|
1601
Cargo.lock
generated
1601
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
66
Cargo.toml
66
Cargo.toml
@@ -1,32 +1,46 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "nostr-rs-relay"
|
name = "nostr-rs-relay"
|
||||||
version = "0.5.1"
|
version = "0.7.13"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
authors = ["Greg Heartsfield <scsibug@imap.cc>"]
|
||||||
|
description = "A relay implementation for the Nostr protocol"
|
||||||
|
readme = "README.md"
|
||||||
|
homepage = "https://sr.ht/~gheartsfield/nostr-rs-relay/"
|
||||||
|
repository = "https://git.sr.ht/~gheartsfield/nostr-rs-relay"
|
||||||
|
license = "MIT"
|
||||||
|
keywords = ["nostr", "server"]
|
||||||
|
categories = ["network-programming", "web-programming"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "^0.4"
|
tracing = "0.1.36"
|
||||||
env_logger = "^0.9"
|
tracing-subscriber = "0.2.0"
|
||||||
tokio = { version = "^1.16", features = ["full"] }
|
tokio = { version = "1", features = ["full", "tracing", "signal"] }
|
||||||
futures = "^0.3"
|
console-subscriber = "0.1.8"
|
||||||
futures-util = "^0.3"
|
futures = "0.3"
|
||||||
tokio-tungstenite = "^0.16"
|
futures-util = "0.3"
|
||||||
tungstenite = "^0.16"
|
tokio-tungstenite = "0.17"
|
||||||
thiserror = "^1"
|
tungstenite = "0.17"
|
||||||
uuid = { version = "^0.8", features = ["v4"] }
|
thiserror = "1"
|
||||||
config = { version = "0.11", features = ["toml"] }
|
uuid = { version = "1.1.2", features = ["v4"] }
|
||||||
bitcoin_hashes = { version = "^0.9", features = ["serde"] }
|
config = { version = "0.12", features = ["toml"] }
|
||||||
secp256k1 = {version = "^0.21", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
|
bitcoin_hashes = { version = "0.10", features = ["serde"] }
|
||||||
serde = { version = "^1.0", features = ["derive"] }
|
secp256k1 = {version = "0.21", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
|
||||||
serde_json = {version = "^1.0", features = ["preserve_order"]}
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
hex = "^0.4"
|
serde_json = {version = "1.0", features = ["preserve_order"]}
|
||||||
rusqlite = { version = "^0.26", features = ["limits"]}
|
hex = "0.4"
|
||||||
r2d2 = "^0.8"
|
rusqlite = { version = "0.26", features = ["limits","bundled"]}
|
||||||
r2d2_sqlite = "^0.19"
|
r2d2 = "0.8"
|
||||||
lazy_static = "^1.4"
|
r2d2_sqlite = "0.19"
|
||||||
governor = "^0.4"
|
lazy_static = "1.4"
|
||||||
nonzero_ext = "^0.3"
|
governor = "0.4"
|
||||||
|
nonzero_ext = "0.3"
|
||||||
hyper = { version="0.14", features=["client", "server","http1","http2","tcp"] }
|
hyper = { version="0.14", features=["client", "server","http1","http2","tcp"] }
|
||||||
hyper-tls = "^0.5"
|
hyper-tls = "0.5"
|
||||||
http = { version = "^0.2" }
|
http = { version = "0.2" }
|
||||||
parse_duration = "^2"
|
parse_duration = "2"
|
||||||
rand = "^0.8"
|
rand = "0.8"
|
||||||
|
const_format = "0.2.28"
|
||||||
|
regex = "1"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
anyhow = "1"
|
||||||
|
16
Dockerfile
16
Dockerfile
@@ -1,18 +1,24 @@
|
|||||||
FROM rust:1.58.1 as builder
|
FROM docker.io/library/rust:1.66.0@sha256:359949280cebefe93ccb33089fe25111a3aadfe99eac4b6cbe8ec3e1b571dacb as builder
|
||||||
|
|
||||||
|
RUN USER=root cargo install cargo-auditable
|
||||||
RUN USER=root cargo new --bin nostr-rs-relay
|
RUN USER=root cargo new --bin nostr-rs-relay
|
||||||
WORKDIR ./nostr-rs-relay
|
WORKDIR ./nostr-rs-relay
|
||||||
COPY ./Cargo.toml ./Cargo.toml
|
COPY ./Cargo.toml ./Cargo.toml
|
||||||
COPY ./Cargo.lock ./Cargo.lock
|
COPY ./Cargo.lock ./Cargo.lock
|
||||||
RUN cargo build --release
|
# build dependencies only (caching)
|
||||||
|
RUN cargo auditable build --release --locked
|
||||||
|
# get rid of starter project code
|
||||||
RUN rm src/*.rs
|
RUN rm src/*.rs
|
||||||
|
|
||||||
|
# copy project source code
|
||||||
COPY ./src ./src
|
COPY ./src ./src
|
||||||
|
|
||||||
|
# build auditable release using locked deps
|
||||||
RUN rm ./target/release/deps/nostr*relay*
|
RUN rm ./target/release/deps/nostr*relay*
|
||||||
RUN cargo build --release
|
RUN cargo auditable build --release --locked
|
||||||
|
|
||||||
|
FROM docker.io/library/debian:bullseye-20221205-slim@sha256:25f10b4f1ded5341a3ca0a30290ff3cd5639415f0c5a2222d5e7d5dd72952aa1
|
||||||
|
|
||||||
FROM debian:bullseye-20220125-slim
|
|
||||||
ARG APP=/usr/src/app
|
ARG APP=/usr/src/app
|
||||||
ARG APP_DATA=/usr/src/app/db
|
ARG APP_DATA=/usr/src/app/db
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
@@ -36,7 +42,7 @@ RUN chown -R $APP_USER:$APP_USER ${APP}
|
|||||||
USER $APP_USER
|
USER $APP_USER
|
||||||
WORKDIR ${APP}
|
WORKDIR ${APP}
|
||||||
|
|
||||||
ENV RUST_LOG=info
|
ENV RUST_LOG=info,nostr_rs_relay=info
|
||||||
ENV APP_DATA=${APP_DATA}
|
ENV APP_DATA=${APP_DATA}
|
||||||
|
|
||||||
CMD ./nostr-rs-relay --db ${APP_DATA}
|
CMD ./nostr-rs-relay --db ${APP_DATA}
|
||||||
|
72
README.md
72
README.md
@@ -1,26 +1,34 @@
|
|||||||
# [nostr-rs-relay](https://git.sr.ht/~gheartsfield/nostr-rs-relay)
|
# [nostr-rs-relay](https://git.sr.ht/~gheartsfield/nostr-rs-relay)
|
||||||
|
|
||||||
This is a [nostr](https://github.com/fiatjaf/nostr) relay, written in
|
This is a [nostr](https://github.com/nostr-protocol/nostr) relay,
|
||||||
Rust. It currently supports the entire relay protocol, and has a
|
written in Rust. It currently supports the entire relay protocol, and
|
||||||
SQLite persistence layer.
|
persists data with SQLite.
|
||||||
|
|
||||||
The project master repository is available on
|
The project master repository is available on
|
||||||
[sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is
|
[sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is
|
||||||
mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
|
mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
|
||||||
|
|
||||||
|
[](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master?)
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
NIPs with a relay-specific implementation are listed here.
|
[NIPs](https://github.com/nostr-protocol/nips) with a relay-specific implementation are listed here.
|
||||||
|
|
||||||
- [x] NIP-01: Core event model
|
- [x] NIP-01: [Basic protocol flow description](https://github.com/nostr-protocol/nips/blob/master/01.md)
|
||||||
- [x] NIP-01: Hide old metadata events
|
* Core event model
|
||||||
- [x] NIP-01: Id/Author prefix search (_experimental_)
|
* Hide old metadata events
|
||||||
- [x] NIP-02: Hide old contact list events
|
* Id/Author prefix search
|
||||||
- [ ] NIP-03: OpenTimestamps
|
- [x] NIP-02: [Contact List and Petnames](https://github.com/nostr-protocol/nips/blob/master/02.md)
|
||||||
- [x] NIP-05: Mapping Nostr keys to DNS identifiers
|
- [ ] NIP-03: [OpenTimestamps Attestations for Events](https://github.com/nostr-protocol/nips/blob/master/03.md)
|
||||||
- [ ] NIP-09: Event deletion
|
- [x] NIP-05: [Mapping Nostr keys to DNS-based internet identifiers](https://github.com/nostr-protocol/nips/blob/master/05.md)
|
||||||
- [x] NIP-11: Relay information document
|
- [x] NIP-09: [Event Deletion](https://github.com/nostr-protocol/nips/blob/master/09.md)
|
||||||
- [x] NIP-12: Generic tag search (_experimental_)
|
- [x] NIP-11: [Relay Information Document](https://github.com/nostr-protocol/nips/blob/master/11.md)
|
||||||
|
- [x] NIP-12: [Generic Tag Queries](https://github.com/nostr-protocol/nips/blob/master/12.md)
|
||||||
|
- [x] NIP-15: [End of Stored Events Notice](https://github.com/nostr-protocol/nips/blob/master/15.md)
|
||||||
|
- [x] NIP-16: [Event Treatment](https://github.com/nostr-protocol/nips/blob/master/16.md)
|
||||||
|
- [x] NIP-20: [Command Results](https://github.com/nostr-protocol/nips/blob/master/20.md)
|
||||||
|
- [x] NIP-22: [Event `created_at` limits](https://github.com/nostr-protocol/nips/blob/master/22.md) (_future-dated events only_)
|
||||||
|
- [x] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md)
|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
@@ -29,15 +37,32 @@ application. Use a bind mount to store the SQLite database outside of
|
|||||||
the container image, and map the container's 8080 port to a host port
|
the container image, and map the container's 8080 port to a host port
|
||||||
(7000 in the example below).
|
(7000 in the example below).
|
||||||
|
|
||||||
|
The examples below start a rootless podman container, mapping a local
|
||||||
|
data directory and config file.
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker build -t nostr-rs-relay .
|
$ podman build -t nostr-rs-relay .
|
||||||
|
|
||||||
$ docker run -it -p 7000:8080 \
|
$ mkdir data
|
||||||
--mount src=$(pwd)/data,target=/usr/src/app/db,type=bind nostr-rs-relay
|
|
||||||
|
|
||||||
[2021-12-31T19:58:31Z INFO nostr_rs_relay] listening on: 0.0.0.0:8080
|
$ podman unshare chown 100:100 data
|
||||||
[2021-12-31T19:58:31Z INFO nostr_rs_relay::db] opened database "/usr/src/app/db/nostr.db" for writing
|
|
||||||
[2021-12-31T19:58:31Z INFO nostr_rs_relay::db] DB version = 2
|
$ podman run -it --rm -p 7000:8080 \
|
||||||
|
--user=100:100 \
|
||||||
|
-v $(pwd)/data:/usr/src/app/db:Z \
|
||||||
|
-v $(pwd)/config.toml:/usr/src/app/config.toml:ro,Z \
|
||||||
|
--name nostr-relay nostr-rs-relay:latest
|
||||||
|
|
||||||
|
Nov 19 15:31:15.013 INFO nostr_rs_relay: Starting up from main
|
||||||
|
Nov 19 15:31:15.017 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080
|
||||||
|
Nov 19 15:31:15.019 INFO nostr_rs_relay::server: db writer created
|
||||||
|
Nov 19 15:31:15.019 INFO nostr_rs_relay::server: control message listener started
|
||||||
|
Nov 19 15:31:15.019 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=4)
|
||||||
|
Nov 19 15:31:15.019 INFO nostr_rs_relay::db: opened database "/usr/src/app/db/nostr.db" for writing
|
||||||
|
Nov 19 15:31:15.019 INFO nostr_rs_relay::schema: DB version = 0
|
||||||
|
Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: database pragma/schema initialized to v7, and ready
|
||||||
|
Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: All migration scripts completed successfully. Welcome to v7.
|
||||||
|
Nov 19 15:31:15.521 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=128)
|
||||||
```
|
```
|
||||||
|
|
||||||
Use a `nostr` client such as
|
Use a `nostr` client such as
|
||||||
@@ -79,8 +104,13 @@ termination, load balancing, and other features), see [Reverse
|
|||||||
Proxy](reverse-proxy.md).
|
Proxy](reverse-proxy.md).
|
||||||
|
|
||||||
## Dev Channel
|
## Dev Channel
|
||||||
The current dev discussions for this project is happening at https://discord.gg/ufG6fH52Vk.
|
|
||||||
Drop in to query any development related questions.
|
For development discussions, please feel free to use the [sourcehut
|
||||||
|
mailing list](https://lists.sr.ht/~gheartsfield/nostr-rs-relay-devel).
|
||||||
|
Or, drop by the [Nostr Telegram Channel](https://t.me/nostr_protocol).
|
||||||
|
|
||||||
|
To chat about `nostr-rs-relay` on `nostr` itself; visit our channel on [anigma](https://anigma.io/) or another client that supports [NIP-28](https://github.com/nostr-protocol/nips/blob/master/28.md) chats:
|
||||||
|
* `2ad246a094fee48c6e455dd13d759d5f41b5a233120f5719d81ebc1935075194`
|
||||||
|
|
||||||
License
|
License
|
||||||
---
|
---
|
||||||
|
45
config.toml
45
config.toml
@@ -16,12 +16,21 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
|
|||||||
# Administrative contact URI
|
# Administrative contact URI
|
||||||
#contact = "mailto:contact@example.com"
|
#contact = "mailto:contact@example.com"
|
||||||
|
|
||||||
|
[diagnostics]
|
||||||
|
# Enable tokio tracing (for use with tokio-console)
|
||||||
|
#tracing = true
|
||||||
|
|
||||||
[database]
|
[database]
|
||||||
# Directory for SQLite files. Defaults to the current directory. Can
|
# Directory for SQLite files. Defaults to the current directory. Can
|
||||||
# also be specified (and overriden) with the "--db dirname" command
|
# also be specified (and overriden) with the "--db dirname" command
|
||||||
# line option.
|
# line option.
|
||||||
data_directory = "."
|
data_directory = "."
|
||||||
|
|
||||||
|
|
||||||
|
# Use an in-memory database instead of 'nostr.db'.
|
||||||
|
# Caution; this will not survive a process restart!
|
||||||
|
#in_memory = false
|
||||||
|
|
||||||
# Database connection pool settings for subscribers:
|
# Database connection pool settings for subscribers:
|
||||||
|
|
||||||
# Minimum number of SQLite reader connections
|
# Minimum number of SQLite reader connections
|
||||||
@@ -37,16 +46,40 @@ address = "0.0.0.0"
|
|||||||
# Listen on this port
|
# Listen on this port
|
||||||
port = 8080
|
port = 8080
|
||||||
|
|
||||||
|
# If present, read this HTTP header for logging client IP addresses.
|
||||||
|
# Examples for common proxies, cloudflare:
|
||||||
|
#remote_ip_header = "x-forwarded-for"
|
||||||
|
#remote_ip_header = "cf-connecting-ip"
|
||||||
|
|
||||||
|
# Websocket ping interval in seconds, defaults to 5 minutes
|
||||||
|
#ping_interval = 300
|
||||||
|
|
||||||
[options]
|
[options]
|
||||||
# Reject events that have timestamps greater than this many seconds in
|
# Reject events that have timestamps greater than this many seconds in
|
||||||
# the future. Defaults to rejecting anything greater than 30 minutes
|
# the future. Recommended to reject anything greater than 30 minutes
|
||||||
# from the current time.
|
# from the current time, but the default is to allow any date.
|
||||||
reject_future_seconds = 1800
|
reject_future_seconds = 1800
|
||||||
|
|
||||||
[limits]
|
[limits]
|
||||||
# Limit events created per second, averaged over one minute. Must be
|
# Limit events created per second, averaged over one minute. Must be
|
||||||
# an integer. If not set (or set to 0), defaults to unlimited.
|
# an integer. If not set (or set to 0), defaults to unlimited. Note:
|
||||||
#messages_per_sec = 0
|
# this is for the server as a whole, not per-connection.
|
||||||
|
# messages_per_sec = 0
|
||||||
|
|
||||||
|
# Limit client subscriptions created per second, averaged over one
|
||||||
|
# minute. Must be an integer. If not set (or set to 0), defaults to
|
||||||
|
# unlimited.
|
||||||
|
#subscriptions_per_min = 0
|
||||||
|
|
||||||
|
# UNIMPLEMENTED...
|
||||||
|
# Limit how many concurrent database connections a client can have.
|
||||||
|
# This prevents a single client from starting too many expensive
|
||||||
|
# database queries. Must be an integer. If not set (or set to 0),
|
||||||
|
# defaults to unlimited (subject to subscription limits).
|
||||||
|
#db_conns_per_client = 0
|
||||||
|
|
||||||
|
# Limit blocking threads used for database connections. Defaults to 16.
|
||||||
|
#max_blocking_threads = 16
|
||||||
|
|
||||||
# Limit the maximum size of an EVENT message. Defaults to 128 KB.
|
# Limit the maximum size of an EVENT message. Defaults to 128 KB.
|
||||||
# Set to 0 for unlimited.
|
# Set to 0 for unlimited.
|
||||||
@@ -63,8 +96,8 @@ reject_future_seconds = 1800
|
|||||||
#broadcast_buffer = 16384
|
#broadcast_buffer = 16384
|
||||||
|
|
||||||
# Event persistence buffer size, in number of events. This provides
|
# Event persistence buffer size, in number of events. This provides
|
||||||
# backpressure to senders if writes are slow. Defaults to 16.
|
# backpressure to senders if writes are slow.
|
||||||
#event_persist_buffer = 16
|
#event_persist_buffer = 4096
|
||||||
|
|
||||||
[authorization]
|
[authorization]
|
||||||
# Pubkey addresses in this array are whitelisted for event publishing.
|
# Pubkey addresses in this array are whitelisted for event publishing.
|
||||||
|
3
mk-platform-agnostic-dockerfile.sh
Executable file
3
mk-platform-agnostic-dockerfile.sh
Executable file
@@ -0,0 +1,3 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
sed -E 's/@sha256:[[:alnum:]]+//g' Dockerfile > Dockerfile.any-platform
|
||||||
|
echo "Created platform-agnostic Dockerfile in 'Dockerfile.any-platform'"
|
@@ -1,8 +1,8 @@
|
|||||||
# Reverse Proxy Setup Guide
|
# Reverse Proxy Setup Guide
|
||||||
|
|
||||||
It is recommended to run `nostr-rs-relay` behind a reverse proxy such
|
It is recommended to run `nostr-rs-relay` behind a reverse proxy such
|
||||||
as `haproxy` or `nginx` to provide TLS termination. A simple example
|
as `haproxy` or `nginx` to provide TLS termination. Simple examples
|
||||||
of an `haproxy` configuration is documented here.
|
of `haproxy` and `nginx` configurations are documented here.
|
||||||
|
|
||||||
## Minimal HAProxy Configuration
|
## Minimal HAProxy Configuration
|
||||||
|
|
||||||
@@ -46,8 +46,47 @@ backend relay
|
|||||||
server relay 127.0.0.1:8080
|
server relay 127.0.0.1:8080
|
||||||
```
|
```
|
||||||
|
|
||||||
### Notes
|
### HAProxy Notes
|
||||||
|
|
||||||
You may experience WebSocket connection problems with Firefox if
|
You may experience WebSocket connection problems with Firefox if
|
||||||
HTTP/2 is enabled, for older versions of HAProxy (2.3.x). Either
|
HTTP/2 is enabled, for older versions of HAProxy (2.3.x). Either
|
||||||
disable HTTP/2 (`h2`), or upgrade HAProxy.
|
disable HTTP/2 (`h2`), or upgrade HAProxy.
|
||||||
|
|
||||||
|
## Bare-bones Nginx Configuration
|
||||||
|
|
||||||
|
Assumptions:
|
||||||
|
|
||||||
|
* `Nginx` version is `1.18.0` (other versions not tested).
|
||||||
|
* Hostname for the relay is `relay.example.com`.
|
||||||
|
* SSL certificate and key are located at `/etc/letsencrypt/live/relay.example.com/`.
|
||||||
|
* Relay is running on port `8080`.
|
||||||
|
|
||||||
|
```
|
||||||
|
http {
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
server_name relay.example.com;
|
||||||
|
ssl_certificate /etc/letsencrypt/live/relay.example.com/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/relay.example.com/privkey.pem;
|
||||||
|
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
|
||||||
|
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||||
|
keepalive_timeout 70;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://localhost:8080;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "Upgrade";
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Nginx Notes
|
||||||
|
|
||||||
|
The above configuration was tested on `nginx` `1.18.0` was tested on `Ubuntu 20.04`.
|
||||||
|
|
||||||
|
For help installing `nginx` on `Ubuntu`, see [this guide](https://www.digitalocean.com/community/tutorials/how-to-install-nginx-on-ubuntu-20-04).
|
||||||
|
|
||||||
|
For guidance on using `letsencrypt` to obtain a cert on `Ubuntu`, including an `nginx` plugin, see [this post](https://www.digitalocean.com/community/tutorials/how-to-secure-nginx-with-let-s-encrypt-on-ubuntu-20-04).
|
||||||
|
@@ -1 +1,4 @@
|
|||||||
edition = "2018"
|
edition = "2021"
|
||||||
|
#max_width = 140
|
||||||
|
#chain_width = 100
|
||||||
|
#fn_call_width = 100
|
||||||
|
10
src/close.rs
10
src/close.rs
@@ -5,7 +5,7 @@ use crate::error::{Error, Result};
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
/// Close command in network format
|
/// Close command in network format
|
||||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||||
pub struct CloseCmd {
|
pub struct CloseCmd {
|
||||||
/// Protocol command, expected to always be "CLOSE".
|
/// Protocol command, expected to always be "CLOSE".
|
||||||
cmd: String,
|
cmd: String,
|
||||||
@@ -14,7 +14,7 @@ pub struct CloseCmd {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Identifier of the subscription to be closed.
|
/// Identifier of the subscription to be closed.
|
||||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||||
pub struct Close {
|
pub struct Close {
|
||||||
/// The subscription identifier being closed.
|
/// The subscription identifier being closed.
|
||||||
pub id: String,
|
pub id: String,
|
||||||
@@ -23,10 +23,10 @@ pub struct Close {
|
|||||||
impl From<CloseCmd> for Result<Close> {
|
impl From<CloseCmd> for Result<Close> {
|
||||||
fn from(cc: CloseCmd) -> Result<Close> {
|
fn from(cc: CloseCmd) -> Result<Close> {
|
||||||
// ensure command is correct
|
// ensure command is correct
|
||||||
if cc.cmd != "CLOSE" {
|
if cc.cmd == "CLOSE" {
|
||||||
Err(Error::CommandUnknownError)
|
|
||||||
} else {
|
|
||||||
Ok(Close { id: cc.id })
|
Ok(Close { id: cc.id })
|
||||||
|
} else {
|
||||||
|
Err(Error::CommandUnknownError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1,14 +1,8 @@
|
|||||||
//! Configuration file and settings management
|
//! Configuration file and settings management
|
||||||
use lazy_static::lazy_static;
|
use config::{Config, ConfigError, File};
|
||||||
use log::*;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::sync::RwLock;
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use tracing::warn;
|
||||||
// initialize a singleton default configuration
|
|
||||||
lazy_static! {
|
|
||||||
pub static ref SETTINGS: RwLock<Settings> = RwLock::new(Settings::default());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
@@ -20,29 +14,31 @@ pub struct Info {
|
|||||||
pub contact: Option<String>,
|
pub contact: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub struct Database {
|
pub struct Database {
|
||||||
pub data_directory: String,
|
pub data_directory: String,
|
||||||
|
pub in_memory: bool,
|
||||||
pub min_conn: u32,
|
pub min_conn: u32,
|
||||||
pub max_conn: u32,
|
pub max_conn: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub struct Network {
|
pub struct Network {
|
||||||
pub port: u16,
|
pub port: u16,
|
||||||
pub address: String,
|
pub address: String,
|
||||||
|
pub remote_ip_header: Option<String>, // retrieve client IP from this HTTP header if present
|
||||||
|
pub ping_interval_seconds: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub struct Options {
|
pub struct Options {
|
||||||
pub reject_future_seconds: Option<usize>, // if defined, reject any events with a timestamp more than X seconds in the future
|
pub reject_future_seconds: Option<usize>, // if defined, reject any events with a timestamp more than X seconds in the future
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub struct Retention {
|
pub struct Retention {
|
||||||
// TODO: implement
|
// TODO: implement
|
||||||
@@ -52,10 +48,13 @@ pub struct Retention {
|
|||||||
pub whitelist_addresses: Option<Vec<String>>, // whitelisted addresses (never delete)
|
pub whitelist_addresses: Option<Vec<String>>, // whitelisted addresses (never delete)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub struct Limits {
|
pub struct Limits {
|
||||||
pub messages_per_sec: Option<u32>, // Artificially slow down event writing to limit disk consumption (averaged over 1 minute)
|
pub messages_per_sec: Option<u32>, // Artificially slow down event writing to limit disk consumption (averaged over 1 minute)
|
||||||
|
pub subscriptions_per_min: Option<u32>, // Artificially slow down request (db query) creation to prevent abuse (averaged over 1 minute)
|
||||||
|
pub db_conns_per_client: Option<u32>, // How many concurrent database queries (not subscriptions) may a client have?
|
||||||
|
pub max_blocking_threads: usize,
|
||||||
pub max_event_bytes: Option<usize>, // Maximum size of an EVENT message
|
pub max_event_bytes: Option<usize>, // Maximum size of an EVENT message
|
||||||
pub max_ws_message_bytes: Option<usize>,
|
pub max_ws_message_bytes: Option<usize>,
|
||||||
pub max_ws_frame_bytes: Option<usize>,
|
pub max_ws_frame_bytes: Option<usize>,
|
||||||
@@ -63,13 +62,19 @@ pub struct Limits {
|
|||||||
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
|
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub struct Authorization {
|
pub struct Authorization {
|
||||||
pub pubkey_whitelist: Option<Vec<String>>, // If present, only allow these pubkeys to publish events
|
pub pubkey_whitelist: Option<Vec<String>>, // If present, only allow these pubkeys to publish events
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
#[allow(unused)]
|
||||||
|
pub struct Diagnostics {
|
||||||
|
pub tracing: bool, // enables tokio console-subscriber
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy)]
|
||||||
#[serde(rename_all = "lowercase")]
|
#[serde(rename_all = "lowercase")]
|
||||||
pub enum VerifiedUsersMode {
|
pub enum VerifiedUsersMode {
|
||||||
Enabled,
|
Enabled,
|
||||||
@@ -77,7 +82,7 @@ pub enum VerifiedUsersMode {
|
|||||||
Disabled,
|
Disabled,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub struct VerifiedUsers {
|
pub struct VerifiedUsers {
|
||||||
pub mode: VerifiedUsersMode, // Mode of operation: "enabled" (enforce) or "passive" (check only). If none, this is simply disabled.
|
pub mode: VerifiedUsersMode, // Mode of operation: "enabled" (enforce) or "passive" (check only). If none, this is simply disabled.
|
||||||
@@ -96,37 +101,46 @@ impl VerifiedUsers {
|
|||||||
self.verify_update_frequency_duration = self.verify_update_duration();
|
self.verify_update_frequency_duration = self.verify_update_duration();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
pub fn is_enabled(&self) -> bool {
|
pub fn is_enabled(&self) -> bool {
|
||||||
self.mode == VerifiedUsersMode::Enabled
|
self.mode == VerifiedUsersMode::Enabled
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
pub fn is_active(&self) -> bool {
|
pub fn is_active(&self) -> bool {
|
||||||
self.mode == VerifiedUsersMode::Enabled || self.mode == VerifiedUsersMode::Passive
|
self.mode == VerifiedUsersMode::Enabled || self.mode == VerifiedUsersMode::Passive
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
pub fn is_passive(&self) -> bool {
|
pub fn is_passive(&self) -> bool {
|
||||||
self.mode == VerifiedUsersMode::Passive
|
self.mode == VerifiedUsersMode::Passive
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
pub fn verify_expiration_duration(&self) -> Option<Duration> {
|
pub fn verify_expiration_duration(&self) -> Option<Duration> {
|
||||||
self.verify_expiration
|
self.verify_expiration
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|x| parse_duration::parse(x).ok())
|
.and_then(|x| parse_duration::parse(x).ok())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
pub fn verify_update_duration(&self) -> Option<Duration> {
|
pub fn verify_update_duration(&self) -> Option<Duration> {
|
||||||
self.verify_update_frequency
|
self.verify_update_frequency
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|x| parse_duration::parse(x).ok())
|
.and_then(|x| parse_duration::parse(x).ok())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
pub fn is_valid(&self) -> bool {
|
pub fn is_valid(&self) -> bool {
|
||||||
self.verify_expiration_duration().is_some() && self.verify_update_duration().is_some()
|
self.verify_expiration_duration().is_some() && self.verify_update_duration().is_some()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub struct Settings {
|
pub struct Settings {
|
||||||
pub info: Info,
|
pub info: Info,
|
||||||
|
pub diagnostics: Diagnostics,
|
||||||
pub database: Database,
|
pub database: Database,
|
||||||
pub network: Network,
|
pub network: Network,
|
||||||
pub limits: Limits,
|
pub limits: Limits,
|
||||||
@@ -137,39 +151,41 @@ pub struct Settings {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Settings {
|
impl Settings {
|
||||||
|
#[must_use]
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
let d = Self::default();
|
let default_settings = Self::default();
|
||||||
// attempt to construct settings with file
|
// attempt to construct settings with file
|
||||||
// Self::new_from_default(&d).unwrap_or(d)
|
let from_file = Self::new_from_default(&default_settings);
|
||||||
let from_file = Self::new_from_default(&d);
|
|
||||||
match from_file {
|
match from_file {
|
||||||
Ok(f) => f,
|
Ok(f) => f,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!("Error reading config file ({:?})", e);
|
warn!("Error reading config file ({:?})", e);
|
||||||
d
|
default_settings
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_from_default(default: &Settings) -> Result<Self, config::ConfigError> {
|
fn new_from_default(default: &Settings) -> Result<Self, ConfigError> {
|
||||||
let config: config::Config = config::Config::new();
|
let builder = Config::builder();
|
||||||
let mut settings: Settings = config
|
let config: Config = builder
|
||||||
// use defaults
|
// use defaults
|
||||||
.with_merged(config::Config::try_from(default).unwrap())?
|
.add_source(Config::try_from(default)?)
|
||||||
// override with file contents
|
// override with file contents
|
||||||
.with_merged(config::File::with_name("config"))?
|
.add_source(File::with_name("config.toml"))
|
||||||
.try_into()?;
|
.build()?;
|
||||||
|
let mut settings: Settings = config.try_deserialize()?;
|
||||||
// ensure connection pool size is logical
|
// ensure connection pool size is logical
|
||||||
if settings.database.min_conn > settings.database.max_conn {
|
assert!(
|
||||||
panic!(
|
settings.database.min_conn <= settings.database.max_conn,
|
||||||
"Database min_conn setting ({}) cannot exceed max_conn ({})",
|
"Database min_conn setting ({}) cannot exceed max_conn ({})",
|
||||||
settings.database.min_conn, settings.database.max_conn
|
settings.database.min_conn,
|
||||||
);
|
settings.database.max_conn
|
||||||
}
|
);
|
||||||
// ensure durations parse
|
// ensure durations parse
|
||||||
if !settings.verified_users.is_valid() {
|
assert!(
|
||||||
panic!("VerifiedUsers time settings could not be parsed");
|
settings.verified_users.is_valid(),
|
||||||
}
|
"VerifiedUsers time settings could not be parsed"
|
||||||
|
);
|
||||||
// initialize durations for verified users
|
// initialize durations for verified users
|
||||||
settings.verified_users.init();
|
settings.verified_users.init();
|
||||||
Ok(settings)
|
Ok(settings)
|
||||||
@@ -186,17 +202,24 @@ impl Default for Settings {
|
|||||||
pubkey: None,
|
pubkey: None,
|
||||||
contact: None,
|
contact: None,
|
||||||
},
|
},
|
||||||
|
diagnostics: Diagnostics { tracing: false },
|
||||||
database: Database {
|
database: Database {
|
||||||
data_directory: ".".to_owned(),
|
data_directory: ".".to_owned(),
|
||||||
|
in_memory: false,
|
||||||
min_conn: 4,
|
min_conn: 4,
|
||||||
max_conn: 128,
|
max_conn: 128,
|
||||||
},
|
},
|
||||||
network: Network {
|
network: Network {
|
||||||
port: 8080,
|
port: 8080,
|
||||||
|
ping_interval_seconds: 300,
|
||||||
address: "0.0.0.0".to_owned(),
|
address: "0.0.0.0".to_owned(),
|
||||||
|
remote_ip_header: None,
|
||||||
},
|
},
|
||||||
limits: Limits {
|
limits: Limits {
|
||||||
messages_per_sec: None,
|
messages_per_sec: None,
|
||||||
|
subscriptions_per_min: None,
|
||||||
|
db_conns_per_client: None,
|
||||||
|
max_blocking_threads: 16,
|
||||||
max_event_bytes: Some(2 << 17), // 128K
|
max_event_bytes: Some(2 << 17), // 128K
|
||||||
max_ws_message_bytes: Some(2 << 17), // 128K
|
max_ws_message_bytes: Some(2 << 17), // 128K
|
||||||
max_ws_frame_bytes: Some(2 << 17), // 128K
|
max_ws_frame_bytes: Some(2 << 17), // 128K
|
||||||
@@ -223,7 +246,7 @@ impl Default for Settings {
|
|||||||
whitelist_addresses: None, // whitelisted addresses (never delete)
|
whitelist_addresses: None, // whitelisted addresses (never delete)
|
||||||
},
|
},
|
||||||
options: Options {
|
options: Options {
|
||||||
reject_future_seconds: Some(30 * 60), // Reject events 30min in the future or greater
|
reject_future_seconds: None, // Reject events in the future if defined
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
63
src/conn.rs
63
src/conn.rs
@@ -2,11 +2,10 @@
|
|||||||
use crate::close::Close;
|
use crate::close::Close;
|
||||||
use crate::error::Error;
|
use crate::error::Error;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::event::Event;
|
|
||||||
|
|
||||||
use crate::subscription::Subscription;
|
use crate::subscription::Subscription;
|
||||||
use log::*;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use tracing::{debug, trace};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
/// A subscription identifier has a maximum length
|
/// A subscription identifier has a maximum length
|
||||||
@@ -14,6 +13,8 @@ const MAX_SUBSCRIPTION_ID_LEN: usize = 256;
|
|||||||
|
|
||||||
/// State for a client connection
|
/// State for a client connection
|
||||||
pub struct ClientConn {
|
pub struct ClientConn {
|
||||||
|
/// Client IP (either from socket, or configured proxy header
|
||||||
|
client_ip: String,
|
||||||
/// Unique client identifier generated at connection time
|
/// Unique client identifier generated at connection time
|
||||||
client_id: Uuid,
|
client_id: Uuid,
|
||||||
/// The current set of active client subscriptions
|
/// The current set of active client subscriptions
|
||||||
@@ -24,46 +25,56 @@ pub struct ClientConn {
|
|||||||
|
|
||||||
impl Default for ClientConn {
|
impl Default for ClientConn {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self::new()
|
Self::new("unknown".to_owned())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ClientConn {
|
impl ClientConn {
|
||||||
/// Create a new, empty connection state.
|
/// Create a new, empty connection state.
|
||||||
pub fn new() -> Self {
|
#[must_use]
|
||||||
|
pub fn new(client_ip: String) -> Self {
|
||||||
let client_id = Uuid::new_v4();
|
let client_id = Uuid::new_v4();
|
||||||
ClientConn {
|
ClientConn {
|
||||||
|
client_ip,
|
||||||
client_id,
|
client_id,
|
||||||
subscriptions: HashMap::new(),
|
subscriptions: HashMap::new(),
|
||||||
max_subs: 32,
|
max_subs: 32,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
|
||||||
|
&self.subscriptions
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if the given subscription already exists
|
||||||
|
pub fn has_subscription(&self, sub: &Subscription) -> bool {
|
||||||
|
self.subscriptions.values().any(|x| x == sub)
|
||||||
|
}
|
||||||
|
|
||||||
/// Get a short prefix of the client's unique identifier, suitable
|
/// Get a short prefix of the client's unique identifier, suitable
|
||||||
/// for logging.
|
/// for logging.
|
||||||
|
#[must_use]
|
||||||
pub fn get_client_prefix(&self) -> String {
|
pub fn get_client_prefix(&self) -> String {
|
||||||
self.client_id.to_string().chars().take(8).collect()
|
self.client_id.to_string().chars().take(8).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Find all matching subscriptions.
|
#[must_use]
|
||||||
pub fn get_matching_subscriptions(&self, e: &Event) -> Vec<&str> {
|
pub fn ip(&self) -> &str {
|
||||||
let mut v: Vec<&str> = vec![];
|
&self.client_ip
|
||||||
for (id, sub) in self.subscriptions.iter() {
|
|
||||||
if sub.interested_in_event(e) {
|
|
||||||
v.push(id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
v
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a new subscription for this connection.
|
/// Add a new subscription for this connection.
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Will return `Err` if the client has too many subscriptions, or
|
||||||
|
/// if the provided name is excessively long.
|
||||||
pub fn subscribe(&mut self, s: Subscription) -> Result<()> {
|
pub fn subscribe(&mut self, s: Subscription) -> Result<()> {
|
||||||
let k = s.get_id();
|
let k = s.get_id();
|
||||||
let sub_id_len = k.len();
|
let sub_id_len = k.len();
|
||||||
// prevent arbitrarily long subscription identifiers from
|
// prevent arbitrarily long subscription identifiers from
|
||||||
// being used.
|
// being used.
|
||||||
if sub_id_len > MAX_SUBSCRIPTION_ID_LEN {
|
if sub_id_len > MAX_SUBSCRIPTION_ID_LEN {
|
||||||
info!(
|
debug!(
|
||||||
"ignoring sub request with excessive length: ({})",
|
"ignoring sub request with excessive length: ({})",
|
||||||
sub_id_len
|
sub_id_len
|
||||||
);
|
);
|
||||||
@@ -72,8 +83,12 @@ impl ClientConn {
|
|||||||
// check if an existing subscription exists, and replace if so
|
// check if an existing subscription exists, and replace if so
|
||||||
if self.subscriptions.contains_key(&k) {
|
if self.subscriptions.contains_key(&k) {
|
||||||
self.subscriptions.remove(&k);
|
self.subscriptions.remove(&k);
|
||||||
self.subscriptions.insert(k, s);
|
self.subscriptions.insert(k, s.clone());
|
||||||
debug!("replaced existing subscription");
|
trace!(
|
||||||
|
"replaced existing subscription (cid: {}, sub: {:?})",
|
||||||
|
self.get_client_prefix(),
|
||||||
|
s.get_id()
|
||||||
|
);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,20 +98,22 @@ impl ClientConn {
|
|||||||
}
|
}
|
||||||
// add subscription
|
// add subscription
|
||||||
self.subscriptions.insert(k, s);
|
self.subscriptions.insert(k, s);
|
||||||
debug!(
|
trace!(
|
||||||
"registered new subscription, currently have {} active subs",
|
"registered new subscription, currently have {} active subs (cid: {})",
|
||||||
self.subscriptions.len()
|
self.subscriptions.len(),
|
||||||
|
self.get_client_prefix(),
|
||||||
);
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Remove the subscription for this connection.
|
/// Remove the subscription for this connection.
|
||||||
pub fn unsubscribe(&mut self, c: Close) {
|
pub fn unsubscribe(&mut self, c: &Close) {
|
||||||
// TODO: return notice if subscription did not exist.
|
// TODO: return notice if subscription did not exist.
|
||||||
self.subscriptions.remove(&c.id);
|
self.subscriptions.remove(&c.id);
|
||||||
debug!(
|
trace!(
|
||||||
"removed subscription, currently have {} active subs",
|
"removed subscription, currently have {} active subs (cid: {})",
|
||||||
self.subscriptions.len()
|
self.subscriptions.len(),
|
||||||
|
self.get_client_prefix(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
759
src/db.rs
759
src/db.rs
@@ -1,29 +1,31 @@
|
|||||||
//! Event persistence and querying
|
//! Event persistence and querying
|
||||||
use crate::config::SETTINGS;
|
//use crate::config::SETTINGS;
|
||||||
use crate::error::Error;
|
use crate::config::Settings;
|
||||||
use crate::error::Result;
|
use crate::error::{Error, Result};
|
||||||
use crate::event::Event;
|
use crate::event::{single_char_tagname, Event};
|
||||||
use crate::hexrange::hex_range;
|
use crate::hexrange::hex_range;
|
||||||
use crate::hexrange::HexSearch;
|
use crate::hexrange::HexSearch;
|
||||||
use crate::nip05;
|
use crate::nip05;
|
||||||
|
use crate::notice::Notice;
|
||||||
use crate::schema::{upgrade_db, STARTUP_SQL};
|
use crate::schema::{upgrade_db, STARTUP_SQL};
|
||||||
|
use crate::subscription::ReqFilter;
|
||||||
use crate::subscription::Subscription;
|
use crate::subscription::Subscription;
|
||||||
use crate::utils::is_hex;
|
use crate::utils::{is_hex, is_lower_hex};
|
||||||
use governor::clock::Clock;
|
use governor::clock::Clock;
|
||||||
use governor::{Quota, RateLimiter};
|
use governor::{Quota, RateLimiter};
|
||||||
use hex;
|
use hex;
|
||||||
use log::*;
|
|
||||||
use r2d2;
|
use r2d2;
|
||||||
use r2d2_sqlite::SqliteConnectionManager;
|
use r2d2_sqlite::SqliteConnectionManager;
|
||||||
use rusqlite::params;
|
use rusqlite::params;
|
||||||
use rusqlite::types::ToSql;
|
use rusqlite::types::ToSql;
|
||||||
use rusqlite::Connection;
|
|
||||||
use rusqlite::OpenFlags;
|
use rusqlite::OpenFlags;
|
||||||
|
use std::fmt::Write as _;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
use tokio::task;
|
use tokio::task;
|
||||||
|
use tracing::{debug, info, trace, warn};
|
||||||
|
|
||||||
pub type SqlitePool = r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>;
|
pub type SqlitePool = r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>;
|
||||||
pub type PooledConnection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnectionManager>;
|
pub type PooledConnection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnectionManager>;
|
||||||
@@ -31,38 +33,58 @@ pub type PooledConnection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnection
|
|||||||
/// Events submitted from a client, with a return channel for notices
|
/// Events submitted from a client, with a return channel for notices
|
||||||
pub struct SubmittedEvent {
|
pub struct SubmittedEvent {
|
||||||
pub event: Event,
|
pub event: Event,
|
||||||
pub notice_tx: tokio::sync::mpsc::Sender<String>,
|
pub notice_tx: tokio::sync::mpsc::Sender<Notice>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Database file
|
/// Database file
|
||||||
pub const DB_FILE: &str = "nostr.db";
|
pub const DB_FILE: &str = "nostr.db";
|
||||||
|
|
||||||
|
/// How frequently to run maintenance
|
||||||
|
/// How many persisted events before DB maintenannce is triggered.
|
||||||
|
pub const EVENT_MAINTENANCE_FREQ_SEC: u64 = 60;
|
||||||
|
|
||||||
|
/// How many persisted events before we pause for backups.
|
||||||
|
/// It isn't clear this is enough to make the online backup API work yet.
|
||||||
|
pub const EVENT_COUNT_BACKUP_PAUSE_TRIGGER: usize = 1000;
|
||||||
|
|
||||||
/// Build a database connection pool.
|
/// Build a database connection pool.
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Will panic if the pool could not be created.
|
||||||
|
#[must_use]
|
||||||
pub fn build_pool(
|
pub fn build_pool(
|
||||||
name: &str,
|
name: &str,
|
||||||
|
settings: &Settings,
|
||||||
flags: OpenFlags,
|
flags: OpenFlags,
|
||||||
min_size: u32,
|
min_size: u32,
|
||||||
max_size: u32,
|
max_size: u32,
|
||||||
wait_for_db: bool,
|
wait_for_db: bool,
|
||||||
) -> SqlitePool {
|
) -> SqlitePool {
|
||||||
let settings = SETTINGS.read().unwrap();
|
|
||||||
|
|
||||||
let db_dir = &settings.database.data_directory;
|
let db_dir = &settings.database.data_directory;
|
||||||
let full_path = Path::new(db_dir).join(DB_FILE);
|
let full_path = Path::new(db_dir).join(DB_FILE);
|
||||||
// small hack; if the database doesn't exist yet, that means the
|
// small hack; if the database doesn't exist yet, that means the
|
||||||
// writer thread hasn't finished. Give it a chance to work. This
|
// writer thread hasn't finished. Give it a chance to work. This
|
||||||
// is only an issue with the first time we run.
|
// is only an issue with the first time we run.
|
||||||
while !full_path.exists() && wait_for_db {
|
if !settings.database.in_memory {
|
||||||
debug!("Database reader pool is waiting on the database to be created...");
|
while !full_path.exists() && wait_for_db {
|
||||||
thread::sleep(Duration::from_millis(500));
|
debug!("Database reader pool is waiting on the database to be created...");
|
||||||
|
thread::sleep(Duration::from_millis(500));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let manager = SqliteConnectionManager::file(&full_path)
|
let manager = if settings.database.in_memory {
|
||||||
.with_flags(flags)
|
SqliteConnectionManager::memory()
|
||||||
.with_init(|c| c.execute_batch(STARTUP_SQL));
|
.with_flags(flags)
|
||||||
|
.with_init(|c| c.execute_batch(STARTUP_SQL))
|
||||||
|
} else {
|
||||||
|
SqliteConnectionManager::file(&full_path)
|
||||||
|
.with_flags(flags)
|
||||||
|
.with_init(|c| c.execute_batch(STARTUP_SQL))
|
||||||
|
};
|
||||||
let pool: SqlitePool = r2d2::Pool::builder()
|
let pool: SqlitePool = r2d2::Pool::builder()
|
||||||
.test_on_check_out(true) // no noticeable performance hit
|
.test_on_check_out(true) // no noticeable performance hit
|
||||||
.min_idle(Some(min_size))
|
.min_idle(Some(min_size))
|
||||||
.max_size(max_size)
|
.max_size(max_size)
|
||||||
|
.max_lifetime(Some(Duration::from_secs(30)))
|
||||||
.build(manager)
|
.build(manager)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
info!(
|
info!(
|
||||||
@@ -72,43 +94,76 @@ pub fn build_pool(
|
|||||||
pool
|
pool
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Build a single database connection, with provided flags
|
/// Perform normal maintenance
|
||||||
pub fn build_conn(flags: OpenFlags) -> Result<Connection> {
|
pub fn optimize_db(conn: &mut PooledConnection) -> Result<()> {
|
||||||
let settings = SETTINGS.read().unwrap();
|
let start = Instant::now();
|
||||||
let db_dir = &settings.database.data_directory;
|
conn.execute_batch("PRAGMA optimize;")?;
|
||||||
let full_path = Path::new(db_dir).join(DB_FILE);
|
info!("optimize ran in {:?}", start.elapsed());
|
||||||
// create a connection
|
Ok(())
|
||||||
Ok(Connection::open_with_flags(&full_path, flags)?)
|
}
|
||||||
|
#[derive(Debug)]
|
||||||
|
enum SqliteReturnStatus {
|
||||||
|
SqliteOk,
|
||||||
|
SqliteBusy,
|
||||||
|
SqliteError,
|
||||||
|
SqliteOther(u64),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checkpoint/Truncate WAL
|
||||||
|
pub fn checkpoint_db(conn: &mut PooledConnection) -> Result<()> {
|
||||||
|
let query = "PRAGMA wal_checkpoint(TRUNCATE);";
|
||||||
|
let start = Instant::now();
|
||||||
|
let (cp_result, wal_size, _frames_checkpointed) = conn.query_row(query, [], |row| {
|
||||||
|
let checkpoint_result: u64 = row.get(0)?;
|
||||||
|
let wal_size: u64 = row.get(1)?;
|
||||||
|
let frames_checkpointed: u64 = row.get(2)?;
|
||||||
|
Ok((checkpoint_result, wal_size, frames_checkpointed))
|
||||||
|
})?;
|
||||||
|
let result = match cp_result {
|
||||||
|
0 => SqliteReturnStatus::SqliteOk,
|
||||||
|
1 => SqliteReturnStatus::SqliteBusy,
|
||||||
|
2 => SqliteReturnStatus::SqliteError,
|
||||||
|
x => SqliteReturnStatus::SqliteOther(x),
|
||||||
|
};
|
||||||
|
info!(
|
||||||
|
"checkpoint ran in {:?} (result: {:?}, WAL size: {})",
|
||||||
|
start.elapsed(),
|
||||||
|
result,
|
||||||
|
wal_size
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Spawn a database writer that persists events to the SQLite store.
|
/// Spawn a database writer that persists events to the SQLite store.
|
||||||
pub async fn db_writer(
|
pub async fn db_writer(
|
||||||
|
settings: Settings,
|
||||||
mut event_rx: tokio::sync::mpsc::Receiver<SubmittedEvent>,
|
mut event_rx: tokio::sync::mpsc::Receiver<SubmittedEvent>,
|
||||||
bcast_tx: tokio::sync::broadcast::Sender<Event>,
|
bcast_tx: tokio::sync::broadcast::Sender<Event>,
|
||||||
metadata_tx: tokio::sync::broadcast::Sender<Event>,
|
metadata_tx: tokio::sync::broadcast::Sender<Event>,
|
||||||
mut shutdown: tokio::sync::broadcast::Receiver<()>,
|
mut shutdown: tokio::sync::broadcast::Receiver<()>,
|
||||||
) -> tokio::task::JoinHandle<Result<()>> {
|
) -> tokio::task::JoinHandle<Result<()>> {
|
||||||
let settings = SETTINGS.read().unwrap();
|
|
||||||
|
|
||||||
// are we performing NIP-05 checking?
|
// are we performing NIP-05 checking?
|
||||||
let nip05_active = settings.verified_users.is_active();
|
let nip05_active = settings.verified_users.is_active();
|
||||||
// are we requriing NIP-05 user verification?
|
// are we requriing NIP-05 user verification?
|
||||||
let nip05_enabled = settings.verified_users.is_enabled();
|
let nip05_enabled = settings.verified_users.is_enabled();
|
||||||
|
|
||||||
task::spawn_blocking(move || {
|
task::spawn_blocking(move || {
|
||||||
// get database configuration settings
|
|
||||||
let settings = SETTINGS.read().unwrap();
|
|
||||||
let db_dir = &settings.database.data_directory;
|
let db_dir = &settings.database.data_directory;
|
||||||
let full_path = Path::new(db_dir).join(DB_FILE);
|
let full_path = Path::new(db_dir).join(DB_FILE);
|
||||||
// create a connection pool
|
// create a connection pool
|
||||||
let pool = build_pool(
|
let pool = build_pool(
|
||||||
"event writer",
|
"event writer",
|
||||||
|
&settings,
|
||||||
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
|
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
|
||||||
1,
|
1,
|
||||||
4,
|
2,
|
||||||
false,
|
false,
|
||||||
);
|
);
|
||||||
info!("opened database {:?} for writing", full_path);
|
if settings.database.in_memory {
|
||||||
|
info!("using in-memory database, this will not persist a restart!");
|
||||||
|
} else {
|
||||||
|
info!("opened database {:?} for writing", full_path);
|
||||||
|
}
|
||||||
upgrade_db(&mut pool.get()?)?;
|
upgrade_db(&mut pool.get()?)?;
|
||||||
|
|
||||||
// Make a copy of the whitelist
|
// Make a copy of the whitelist
|
||||||
@@ -118,6 +173,10 @@ pub async fn db_writer(
|
|||||||
let rps_setting = settings.limits.messages_per_sec;
|
let rps_setting = settings.limits.messages_per_sec;
|
||||||
let mut most_recent_rate_limit = Instant::now();
|
let mut most_recent_rate_limit = Instant::now();
|
||||||
let mut lim_opt = None;
|
let mut lim_opt = None;
|
||||||
|
// Constant writing has interfered with online backups. Keep
|
||||||
|
// track of how long since we've given the backups a chance to
|
||||||
|
// run.
|
||||||
|
let mut backup_pause_counter: usize = 0;
|
||||||
let clock = governor::clock::QuantaClock::default();
|
let clock = governor::clock::QuantaClock::default();
|
||||||
if let Some(rps) = rps_setting {
|
if let Some(rps) = rps_setting {
|
||||||
if rps > 0 {
|
if rps > 0 {
|
||||||
@@ -137,12 +196,15 @@ pub async fn db_writer(
|
|||||||
if next_event.is_none() {
|
if next_event.is_none() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
// track if an event write occurred; this is used to
|
||||||
|
// update the rate limiter
|
||||||
let mut event_write = false;
|
let mut event_write = false;
|
||||||
let subm_event = next_event.unwrap();
|
let subm_event = next_event.unwrap();
|
||||||
let event = subm_event.event;
|
let event = subm_event.event;
|
||||||
let notice_tx = subm_event.notice_tx;
|
let notice_tx = subm_event.notice_tx;
|
||||||
// check if this event is authorized.
|
// check if this event is authorized.
|
||||||
if let Some(allowed_addrs) = whitelist {
|
if let Some(allowed_addrs) = whitelist {
|
||||||
|
// TODO: incorporate delegated pubkeys
|
||||||
// if the event address is not in allowed_addrs.
|
// if the event address is not in allowed_addrs.
|
||||||
if !allowed_addrs.contains(&event.pubkey) {
|
if !allowed_addrs.contains(&event.pubkey) {
|
||||||
info!(
|
info!(
|
||||||
@@ -150,7 +212,10 @@ pub async fn db_writer(
|
|||||||
event.get_event_id_prefix()
|
event.get_event_id_prefix()
|
||||||
);
|
);
|
||||||
notice_tx
|
notice_tx
|
||||||
.try_send("pubkey is not allowed to publish to this relay".to_owned())
|
.try_send(Notice::blocked(
|
||||||
|
event.id,
|
||||||
|
"pubkey is not allowed to publish to this relay",
|
||||||
|
))
|
||||||
.ok();
|
.ok();
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -169,22 +234,23 @@ pub async fn db_writer(
|
|||||||
if nip05_enabled {
|
if nip05_enabled {
|
||||||
match nip05::query_latest_user_verification(pool.get()?, event.pubkey.to_owned()) {
|
match nip05::query_latest_user_verification(pool.get()?, event.pubkey.to_owned()) {
|
||||||
Ok(uv) => {
|
Ok(uv) => {
|
||||||
if uv.is_valid() {
|
if uv.is_valid(&settings.verified_users) {
|
||||||
info!(
|
info!(
|
||||||
"new event from verified author ({:?},{:?})",
|
"new event from verified author ({:?},{:?})",
|
||||||
uv.name.to_string(),
|
uv.name.to_string(),
|
||||||
event.get_author_prefix()
|
event.get_author_prefix()
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
info!("rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)",
|
info!(
|
||||||
uv.name.to_string(),
|
"rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)",
|
||||||
event.get_author_prefix()
|
uv.name.to_string(),
|
||||||
|
event.get_author_prefix()
|
||||||
);
|
);
|
||||||
notice_tx
|
notice_tx
|
||||||
.try_send(
|
.try_send(Notice::blocked(
|
||||||
"NIP-05 verification is no longer valid (expired/wrong domain)"
|
event.id,
|
||||||
.to_owned(),
|
"NIP-05 verification is no longer valid (expired/wrong domain)",
|
||||||
)
|
))
|
||||||
.ok();
|
.ok();
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -195,7 +261,10 @@ pub async fn db_writer(
|
|||||||
event.get_author_prefix()
|
event.get_author_prefix()
|
||||||
);
|
);
|
||||||
notice_tx
|
notice_tx
|
||||||
.try_send("NIP-05 verification needed to publish events".to_owned())
|
.try_send(Notice::blocked(
|
||||||
|
event.id,
|
||||||
|
"NIP-05 verification needed to publish events",
|
||||||
|
))
|
||||||
.ok();
|
.ok();
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -207,30 +276,46 @@ pub async fn db_writer(
|
|||||||
}
|
}
|
||||||
// TODO: cache recent list of authors to remove a DB call.
|
// TODO: cache recent list of authors to remove a DB call.
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
match write_event(&mut pool.get()?, &event) {
|
if event.kind >= 20000 && event.kind < 30000 {
|
||||||
Ok(updated) => {
|
bcast_tx.send(event.clone()).ok();
|
||||||
if updated == 0 {
|
info!(
|
||||||
trace!("ignoring duplicate event");
|
"published ephemeral event: {:?} from: {:?} in: {:?}",
|
||||||
} else {
|
event.get_event_id_prefix(),
|
||||||
info!(
|
event.get_author_prefix(),
|
||||||
"persisted event {:?} from {:?} in {:?}",
|
start.elapsed()
|
||||||
event.get_event_id_prefix(),
|
);
|
||||||
event.get_author_prefix(),
|
event_write = true
|
||||||
start.elapsed()
|
} else {
|
||||||
);
|
log_pool_stats("writer", &pool);
|
||||||
event_write = true;
|
match write_event(&mut pool.get()?, &event) {
|
||||||
// send this out to all clients
|
Ok(updated) => {
|
||||||
bcast_tx.send(event.clone()).ok();
|
if updated == 0 {
|
||||||
|
trace!("ignoring duplicate or deleted event");
|
||||||
|
notice_tx.try_send(Notice::duplicate(event.id)).ok();
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
"persisted event: {:?} from: {:?} in: {:?}",
|
||||||
|
event.get_event_id_prefix(),
|
||||||
|
event.get_author_prefix(),
|
||||||
|
start.elapsed()
|
||||||
|
);
|
||||||
|
event_write = true;
|
||||||
|
// send this out to all clients
|
||||||
|
bcast_tx.send(event.clone()).ok();
|
||||||
|
notice_tx.try_send(Notice::saved(event.id)).ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
warn!("event insert failed: {:?}", err);
|
||||||
|
let msg = "relay experienced an error trying to publish the latest event";
|
||||||
|
notice_tx.try_send(Notice::error(event.id, msg)).ok();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(err) => {
|
backup_pause_counter += 1;
|
||||||
warn!("event insert failed: {:?}", err);
|
if backup_pause_counter > EVENT_COUNT_BACKUP_PAUSE_TRIGGER {
|
||||||
notice_tx
|
info!("pausing db write thread for a moment...");
|
||||||
.try_send(
|
thread::sleep(Duration::from_millis(500));
|
||||||
"relay experienced an error trying to publish the latest event"
|
backup_pause_counter = 0
|
||||||
.to_owned(),
|
|
||||||
)
|
|
||||||
.ok();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -268,16 +353,18 @@ pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
|
|||||||
let tx = conn.transaction()?;
|
let tx = conn.transaction()?;
|
||||||
// get relevant fields from event and convert to blobs.
|
// get relevant fields from event and convert to blobs.
|
||||||
let id_blob = hex::decode(&e.id).ok();
|
let id_blob = hex::decode(&e.id).ok();
|
||||||
let pubkey_blob = hex::decode(&e.pubkey).ok();
|
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
|
||||||
|
let delegator_blob: Option<Vec<u8>> = e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
|
||||||
let event_str = serde_json::to_string(&e).ok();
|
let event_str = serde_json::to_string(&e).ok();
|
||||||
// ignore if the event hash is a duplicate.
|
// ignore if the event hash is a duplicate.
|
||||||
let ins_count = tx.execute(
|
let mut ins_count = tx.execute(
|
||||||
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, strftime('%s','now'), FALSE);",
|
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, strftime('%s','now'), FALSE);",
|
||||||
params![id_blob, e.created_at, e.kind, pubkey_blob, event_str]
|
params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str]
|
||||||
)?;
|
)?;
|
||||||
if ins_count == 0 {
|
if ins_count == 0 {
|
||||||
// if the event was a duplicate, no need to insert event or
|
// if the event was a duplicate, no need to insert event or
|
||||||
// pubkey references.
|
// pubkey references.
|
||||||
|
tx.rollback().ok();
|
||||||
return Ok(ins_count);
|
return Ok(ins_count);
|
||||||
}
|
}
|
||||||
// remember primary key of the event most recently inserted.
|
// remember primary key of the event most recently inserted.
|
||||||
@@ -288,48 +375,86 @@ pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
|
|||||||
if tag.len() >= 2 {
|
if tag.len() >= 2 {
|
||||||
let tagname = &tag[0];
|
let tagname = &tag[0];
|
||||||
let tagval = &tag[1];
|
let tagval = &tag[1];
|
||||||
// if tagvalue is hex;
|
// only single-char tags are searchable
|
||||||
if is_hex(tagval) {
|
let tagchar_opt = single_char_tagname(tagname);
|
||||||
tx.execute(
|
match &tagchar_opt {
|
||||||
"INSERT OR IGNORE INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3)",
|
Some(_) => {
|
||||||
params![ev_id, &tagname, hex::decode(&tagval).ok()],
|
// if tagvalue is lowercase hex;
|
||||||
)?;
|
if is_lower_hex(tagval) && (tagval.len() % 2 == 0) {
|
||||||
} else {
|
tx.execute(
|
||||||
tx.execute(
|
"INSERT OR IGNORE INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3)",
|
||||||
"INSERT OR IGNORE INTO tag (event_id, name, value) VALUES (?1, ?2, ?3)",
|
params![ev_id, &tagname, hex::decode(tagval).ok()],
|
||||||
params![ev_id, &tagname, &tagval],
|
)?;
|
||||||
)?;
|
} else {
|
||||||
|
tx.execute(
|
||||||
|
"INSERT OR IGNORE INTO tag (event_id, name, value) VALUES (?1, ?2, ?3)",
|
||||||
|
params![ev_id, &tagname, &tagval],
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// if this event is for a metadata update, hide every other kind=0
|
// if this event is replaceable update, hide every other replaceable
|
||||||
// event from the same author that was issued earlier than this.
|
// event with the same kind from the same author that was issued
|
||||||
if e.kind == 0 {
|
// earlier than this.
|
||||||
|
if e.kind == 0 || e.kind == 3 || (e.kind >= 10000 && e.kind < 20000) {
|
||||||
let update_count = tx.execute(
|
let update_count = tx.execute(
|
||||||
"UPDATE event SET hidden=TRUE WHERE id!=? AND kind=0 AND author=? AND created_at <= ? and hidden!=TRUE",
|
"UPDATE event SET hidden=TRUE WHERE id!=? AND kind=? AND author=? AND created_at <= ? and hidden!=TRUE",
|
||||||
params![ev_id, hex::decode(&e.pubkey).ok(), e.created_at],
|
params![ev_id, e.kind, hex::decode(&e.pubkey).ok(), e.created_at],
|
||||||
)?;
|
)?;
|
||||||
if update_count > 0 {
|
if update_count > 0 {
|
||||||
info!(
|
info!(
|
||||||
"hid {} older metadata events for author {:?}",
|
"hid {} older replaceable kind {} events for author: {:?}",
|
||||||
update_count,
|
update_count,
|
||||||
|
e.kind,
|
||||||
e.get_author_prefix()
|
e.get_author_prefix()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// if this event is for a contact update, hide every other kind=3
|
// if this event is a deletion, hide the referenced events from the same author.
|
||||||
// event from the same author that was issued earlier than this.
|
if e.kind == 5 {
|
||||||
if e.kind == 3 {
|
let event_candidates = e.tag_values_by_name("e");
|
||||||
let update_count = tx.execute(
|
// first parameter will be author
|
||||||
"UPDATE event SET hidden=TRUE WHERE id!=? AND kind=3 AND author=? AND created_at <= ? and hidden!=TRUE",
|
let mut params: Vec<Box<dyn ToSql>> = vec![Box::new(hex::decode(&e.pubkey)?)];
|
||||||
params![ev_id, hex::decode(&e.pubkey).ok(), e.created_at],
|
event_candidates
|
||||||
)?;
|
.iter()
|
||||||
if update_count > 0 {
|
.filter(|x| is_hex(x) && x.len() == 64)
|
||||||
|
.filter_map(|x| hex::decode(x).ok())
|
||||||
|
.for_each(|x| params.push(Box::new(x)));
|
||||||
|
let query = format!(
|
||||||
|
"UPDATE event SET hidden=TRUE WHERE kind!=5 AND author=? AND event_hash IN ({})",
|
||||||
|
repeat_vars(params.len() - 1)
|
||||||
|
);
|
||||||
|
let mut stmt = tx.prepare(&query)?;
|
||||||
|
let update_count = stmt.execute(rusqlite::params_from_iter(params))?;
|
||||||
|
info!(
|
||||||
|
"hid {} deleted events for author {:?}",
|
||||||
|
update_count,
|
||||||
|
e.get_author_prefix()
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
// check if a deletion has already been recorded for this event.
|
||||||
|
// Only relevant for non-deletion events
|
||||||
|
let del_count = tx.query_row(
|
||||||
|
"SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.author=? AND t.name='e' AND e.kind=5 AND t.value_hex=? LIMIT 1;",
|
||||||
|
params![pubkey_blob, id_blob], |row| row.get::<usize, usize>(0));
|
||||||
|
// check if a the query returned a result, meaning we should
|
||||||
|
// hid the current event
|
||||||
|
if del_count.ok().is_some() {
|
||||||
|
// a deletion already existed, mark original event as hidden.
|
||||||
info!(
|
info!(
|
||||||
"hid {} older contact events for author {:?}",
|
"hid event: {:?} due to existing deletion by author: {:?}",
|
||||||
update_count,
|
e.get_event_id_prefix(),
|
||||||
e.get_author_prefix()
|
e.get_author_prefix()
|
||||||
);
|
);
|
||||||
|
let _update_count =
|
||||||
|
tx.execute("UPDATE event SET hidden=TRUE WHERE id=?", params![ev_id])?;
|
||||||
|
// event was deleted, so let caller know nothing new
|
||||||
|
// arrived, preventing this from being sent to active
|
||||||
|
// subscriptions
|
||||||
|
ins_count = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tx.commit()?;
|
tx.commit()?;
|
||||||
@@ -337,7 +462,7 @@ pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Serialized event associated with a specific subscription request.
|
/// Serialized event associated with a specific subscription request.
|
||||||
#[derive(PartialEq, Debug, Clone)]
|
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||||
pub struct QueryResult {
|
pub struct QueryResult {
|
||||||
/// Subscription identifier
|
/// Subscription identifier
|
||||||
pub sub_id: String,
|
pub sub_id: String,
|
||||||
@@ -356,145 +481,229 @@ fn repeat_vars(count: usize) -> String {
|
|||||||
s
|
s
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a dynamic SQL query string and params from a subscription.
|
/// Create a dynamic SQL subquery and params from a subscription filter.
|
||||||
fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
|
fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
|
||||||
// build a dynamic SQL query. all user-input is either an integer
|
// build a dynamic SQL query. all user-input is either an integer
|
||||||
// (sqli-safe), or a string that is filtered to only contain
|
// (sqli-safe), or a string that is filtered to only contain
|
||||||
// hexadecimal characters. Strings that require escaping (tag
|
// hexadecimal characters. Strings that require escaping (tag
|
||||||
// names/values) use parameters.
|
// names/values) use parameters.
|
||||||
let mut query =
|
|
||||||
"SELECT DISTINCT(e.content) FROM event e LEFT JOIN tag t ON e.id=t.event_id ".to_owned();
|
// if the filter is malformed, don't return anything.
|
||||||
// parameters
|
if f.force_no_match {
|
||||||
|
let empty_query = "SELECT e.content, e.created_at FROM event e WHERE 1=0".to_owned();
|
||||||
|
// query parameters for SQLite
|
||||||
|
let empty_params: Vec<Box<dyn ToSql>> = vec![];
|
||||||
|
return (empty_query, empty_params);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut query = "SELECT e.content, e.created_at FROM event e".to_owned();
|
||||||
|
// query parameters for SQLite
|
||||||
let mut params: Vec<Box<dyn ToSql>> = vec![];
|
let mut params: Vec<Box<dyn ToSql>> = vec![];
|
||||||
|
|
||||||
// for every filter in the subscription, generate a where clause
|
// individual filter components (single conditions such as an author or event ID)
|
||||||
let mut filter_clauses: Vec<String> = Vec::new();
|
let mut filter_components: Vec<String> = Vec::new();
|
||||||
for f in sub.filters.iter() {
|
// Query for "authors", allowing prefix matches
|
||||||
// individual filter components
|
if let Some(authvec) = &f.authors {
|
||||||
let mut filter_components: Vec<String> = Vec::new();
|
// take each author and convert to a hexsearch
|
||||||
// Query for "authors", allowing prefix matches
|
let mut auth_searches: Vec<String> = vec![];
|
||||||
if let Some(authvec) = &f.authors {
|
for auth in authvec {
|
||||||
// take each author and convert to a hexsearch
|
match hex_range(auth) {
|
||||||
let mut auth_searches: Vec<String> = vec![];
|
Some(HexSearch::Exact(ex)) => {
|
||||||
for auth in authvec {
|
auth_searches.push("author=? OR delegated_by=?".to_owned());
|
||||||
match hex_range(auth) {
|
params.push(Box::new(ex.clone()));
|
||||||
Some(HexSearch::Exact(ex)) => {
|
params.push(Box::new(ex));
|
||||||
auth_searches.push("author=?".to_owned());
|
}
|
||||||
params.push(Box::new(ex));
|
Some(HexSearch::Range(lower, upper)) => {
|
||||||
}
|
auth_searches.push(
|
||||||
Some(HexSearch::Range(lower, upper)) => {
|
"(author>? AND author<?) OR (delegated_by>? AND delegated_by<?)".to_owned(),
|
||||||
auth_searches.push("(author>? AND author<?)".to_owned());
|
);
|
||||||
params.push(Box::new(lower));
|
params.push(Box::new(lower.clone()));
|
||||||
params.push(Box::new(upper));
|
params.push(Box::new(upper.clone()));
|
||||||
}
|
params.push(Box::new(lower));
|
||||||
Some(HexSearch::LowerOnly(lower)) => {
|
params.push(Box::new(upper));
|
||||||
auth_searches.push("author>?".to_owned());
|
}
|
||||||
params.push(Box::new(lower));
|
Some(HexSearch::LowerOnly(lower)) => {
|
||||||
}
|
auth_searches.push("author>? OR delegated_by>?".to_owned());
|
||||||
None => {
|
params.push(Box::new(lower.clone()));
|
||||||
info!("Could not parse hex range from author {:?}", auth);
|
params.push(Box::new(lower));
|
||||||
}
|
}
|
||||||
|
None => {
|
||||||
|
info!("Could not parse hex range from author {:?}", auth);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
if !authvec.is_empty() {
|
||||||
let authors_clause = format!("({})", auth_searches.join(" OR "));
|
let authors_clause = format!("({})", auth_searches.join(" OR "));
|
||||||
filter_components.push(authors_clause);
|
filter_components.push(authors_clause);
|
||||||
|
} else {
|
||||||
|
// if the authors list was empty, we should never return
|
||||||
|
// any results.
|
||||||
|
filter_components.push("false".to_owned());
|
||||||
}
|
}
|
||||||
// Query for Kind
|
}
|
||||||
if let Some(ks) = &f.kinds {
|
// Query for Kind
|
||||||
// kind is number, no escaping needed
|
if let Some(ks) = &f.kinds {
|
||||||
let str_kinds: Vec<String> = ks.iter().map(|x| x.to_string()).collect();
|
// kind is number, no escaping needed
|
||||||
let kind_clause = format!("kind IN ({})", str_kinds.join(", "));
|
let str_kinds: Vec<String> = ks.iter().map(|x| x.to_string()).collect();
|
||||||
filter_components.push(kind_clause);
|
let kind_clause = format!("kind IN ({})", str_kinds.join(", "));
|
||||||
}
|
filter_components.push(kind_clause);
|
||||||
// Query for event, allowing prefix matches
|
}
|
||||||
if let Some(idvec) = &f.ids {
|
// Query for event, allowing prefix matches
|
||||||
// take each author and convert to a hexsearch
|
if let Some(idvec) = &f.ids {
|
||||||
let mut id_searches: Vec<String> = vec![];
|
// take each author and convert to a hexsearch
|
||||||
for id in idvec {
|
let mut id_searches: Vec<String> = vec![];
|
||||||
match hex_range(id) {
|
for id in idvec {
|
||||||
Some(HexSearch::Exact(ex)) => {
|
match hex_range(id) {
|
||||||
id_searches.push("event_hash=?".to_owned());
|
Some(HexSearch::Exact(ex)) => {
|
||||||
params.push(Box::new(ex));
|
id_searches.push("event_hash=?".to_owned());
|
||||||
}
|
params.push(Box::new(ex));
|
||||||
Some(HexSearch::Range(lower, upper)) => {
|
}
|
||||||
id_searches.push("(event_hash>? AND event_hash<?)".to_owned());
|
Some(HexSearch::Range(lower, upper)) => {
|
||||||
params.push(Box::new(lower));
|
id_searches.push("(event_hash>? AND event_hash<?)".to_owned());
|
||||||
params.push(Box::new(upper));
|
params.push(Box::new(lower));
|
||||||
}
|
params.push(Box::new(upper));
|
||||||
Some(HexSearch::LowerOnly(lower)) => {
|
}
|
||||||
id_searches.push("event_hash>?".to_owned());
|
Some(HexSearch::LowerOnly(lower)) => {
|
||||||
params.push(Box::new(lower));
|
id_searches.push("event_hash>?".to_owned());
|
||||||
}
|
params.push(Box::new(lower));
|
||||||
None => {
|
}
|
||||||
info!("Could not parse hex range from id {:?}", id);
|
None => {
|
||||||
}
|
info!("Could not parse hex range from id {:?}", id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
if !idvec.is_empty() {
|
||||||
let id_clause = format!("({})", id_searches.join(" OR "));
|
let id_clause = format!("({})", id_searches.join(" OR "));
|
||||||
filter_components.push(id_clause);
|
filter_components.push(id_clause);
|
||||||
|
} else {
|
||||||
|
// if the ids list was empty, we should never return
|
||||||
|
// any results.
|
||||||
|
filter_components.push("false".to_owned());
|
||||||
}
|
}
|
||||||
// Query for tags
|
}
|
||||||
if let Some(map) = &f.tags {
|
// Query for tags
|
||||||
for (key, val) in map.iter() {
|
if let Some(map) = &f.tags {
|
||||||
let mut str_vals: Vec<Box<dyn ToSql>> = vec![];
|
for (key, val) in map.iter() {
|
||||||
let mut blob_vals: Vec<Box<dyn ToSql>> = vec![];
|
let mut str_vals: Vec<Box<dyn ToSql>> = vec![];
|
||||||
for v in val {
|
let mut blob_vals: Vec<Box<dyn ToSql>> = vec![];
|
||||||
if is_hex(v) {
|
for v in val {
|
||||||
if let Ok(h) = hex::decode(&v) {
|
if (v.len() % 2 == 0) && is_lower_hex(v) {
|
||||||
blob_vals.push(Box::new(h));
|
if let Ok(h) = hex::decode(v) {
|
||||||
}
|
blob_vals.push(Box::new(h));
|
||||||
} else {
|
|
||||||
str_vals.push(Box::new(v.to_owned()));
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
str_vals.push(Box::new(v.to_owned()));
|
||||||
}
|
}
|
||||||
// create clauses with "?" params for each tag value being searched
|
|
||||||
let str_clause = format!("value IN ({})", repeat_vars(str_vals.len()));
|
|
||||||
let blob_clause = format!("value_hex IN ({})", repeat_vars(blob_vals.len()));
|
|
||||||
let tag_clause = format!("(name=? AND ({} OR {}))", str_clause, blob_clause);
|
|
||||||
// add the tag name as the first parameter
|
|
||||||
params.push(Box::new(key.to_owned()));
|
|
||||||
// add all tag values that are plain strings as params
|
|
||||||
params.append(&mut str_vals);
|
|
||||||
// add all tag values that are blobs as params
|
|
||||||
params.append(&mut blob_vals);
|
|
||||||
filter_components.push(tag_clause);
|
|
||||||
}
|
}
|
||||||
}
|
// create clauses with "?" params for each tag value being searched
|
||||||
// Query for timestamp
|
let str_clause = format!("value IN ({})", repeat_vars(str_vals.len()));
|
||||||
if f.since.is_some() {
|
let blob_clause = format!("value_hex IN ({})", repeat_vars(blob_vals.len()));
|
||||||
let created_clause = format!("created_at > {}", f.since.unwrap());
|
// find evidence of the target tag name/value existing for this event.
|
||||||
filter_components.push(created_clause);
|
let tag_clause = format!(
|
||||||
}
|
"e.id IN (SELECT e.id FROM event e LEFT JOIN tag t on e.id=t.event_id WHERE hidden!=TRUE and (name=? AND ({} OR {})))",
|
||||||
// Query for timestamp
|
str_clause, blob_clause
|
||||||
if f.until.is_some() {
|
);
|
||||||
let until_clause = format!("created_at < {}", f.until.unwrap());
|
// add the tag name as the first parameter
|
||||||
filter_components.push(until_clause);
|
params.push(Box::new(key.to_string()));
|
||||||
}
|
// add all tag values that are plain strings as params
|
||||||
|
params.append(&mut str_vals);
|
||||||
// combine all clauses, and add to filter_clauses
|
// add all tag values that are blobs as params
|
||||||
if !filter_components.is_empty() {
|
params.append(&mut blob_vals);
|
||||||
let mut fc = "( ".to_owned();
|
filter_components.push(tag_clause);
|
||||||
fc.push_str(&filter_components.join(" AND "));
|
|
||||||
fc.push_str(" )");
|
|
||||||
filter_clauses.push(fc);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Query for timestamp
|
||||||
|
if f.since.is_some() {
|
||||||
|
let created_clause = format!("created_at > {}", f.since.unwrap());
|
||||||
|
filter_components.push(created_clause);
|
||||||
|
}
|
||||||
|
// Query for timestamp
|
||||||
|
if f.until.is_some() {
|
||||||
|
let until_clause = format!("created_at < {}", f.until.unwrap());
|
||||||
|
filter_components.push(until_clause);
|
||||||
|
}
|
||||||
// never display hidden events
|
// never display hidden events
|
||||||
query.push_str(" WHERE hidden!=TRUE ");
|
query.push_str(" WHERE hidden!=TRUE");
|
||||||
|
// build filter component conditions
|
||||||
// combine all filters with OR clauses, if any exist
|
if !filter_components.is_empty() {
|
||||||
if !filter_clauses.is_empty() {
|
query.push_str(" AND ");
|
||||||
query.push_str(" AND (");
|
query.push_str(&filter_components.join(" AND "));
|
||||||
query.push_str(&filter_clauses.join(" OR "));
|
}
|
||||||
query.push_str(") ");
|
// Apply per-filter limit to this subquery.
|
||||||
|
// The use of a LIMIT implies a DESC order, to capture only the most recent events.
|
||||||
|
if let Some(lim) = f.limit {
|
||||||
|
let _ = write!(query, " ORDER BY e.created_at DESC LIMIT {}", lim);
|
||||||
|
} else {
|
||||||
|
query.push_str(" ORDER BY e.created_at ASC")
|
||||||
}
|
}
|
||||||
// add order clause
|
|
||||||
query.push_str(" ORDER BY created_at ASC");
|
|
||||||
debug!("query string: {}", query);
|
|
||||||
(query, params)
|
(query, params)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create a dynamic SQL query string and params from a subscription.
|
||||||
|
fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
|
||||||
|
// build a dynamic SQL query for an entire subscription, based on
|
||||||
|
// SQL subqueries for filters.
|
||||||
|
let mut subqueries: Vec<String> = Vec::new();
|
||||||
|
// subquery params
|
||||||
|
let mut params: Vec<Box<dyn ToSql>> = vec![];
|
||||||
|
// for every filter in the subscription, generate a subquery
|
||||||
|
for f in sub.filters.iter() {
|
||||||
|
let (f_subquery, mut f_params) = query_from_filter(f);
|
||||||
|
subqueries.push(f_subquery);
|
||||||
|
params.append(&mut f_params);
|
||||||
|
}
|
||||||
|
// encapsulate subqueries into select statements
|
||||||
|
let subqueries_selects: Vec<String> = subqueries
|
||||||
|
.iter()
|
||||||
|
.map(|s| format!("SELECT distinct content, created_at FROM ({})", s))
|
||||||
|
.collect();
|
||||||
|
let query: String = subqueries_selects.join(" UNION ");
|
||||||
|
(query, params)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if the pool is fully utilized
|
||||||
|
fn _pool_at_capacity(pool: &SqlitePool) -> bool {
|
||||||
|
let state: r2d2::State = pool.state();
|
||||||
|
state.idle_connections == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log pool stats
|
||||||
|
fn log_pool_stats(name: &str, pool: &SqlitePool) {
|
||||||
|
let state: r2d2::State = pool.state();
|
||||||
|
let in_use_cxns = state.connections - state.idle_connections;
|
||||||
|
trace!(
|
||||||
|
"DB pool {:?} usage (in_use: {}, available: {})",
|
||||||
|
name,
|
||||||
|
in_use_cxns,
|
||||||
|
state.connections
|
||||||
|
);
|
||||||
|
if state.connections == in_use_cxns {
|
||||||
|
debug!("DB pool {:?} is empty (in_use: {})", name, in_use_cxns);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Perform database maintenance on a regular basis
|
||||||
|
pub async fn db_maintenance(pool: SqlitePool) {
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
_ = tokio::time::sleep(Duration::from_secs(EVENT_MAINTENANCE_FREQ_SEC)) => {
|
||||||
|
if let Ok(mut conn) = pool.get() {
|
||||||
|
// the busy timer will block writers, so don't set
|
||||||
|
// this any higher than you want max latency for event
|
||||||
|
// writes.
|
||||||
|
conn.busy_timeout(Duration::from_secs(1)).ok();
|
||||||
|
debug!("running database optimizer");
|
||||||
|
optimize_db(&mut conn).ok();
|
||||||
|
debug!("running wal_checkpoint(TRUNCATE)");
|
||||||
|
checkpoint_db(&mut conn).ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/// Perform a database query using a subscription.
|
/// Perform a database query using a subscription.
|
||||||
///
|
///
|
||||||
/// The [`Subscription`] is converted into a SQL query. Each result
|
/// The [`Subscription`] is converted into a SQL query. Each result
|
||||||
@@ -503,39 +712,139 @@ fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
|
|||||||
/// query is immediately aborted.
|
/// query is immediately aborted.
|
||||||
pub async fn db_query(
|
pub async fn db_query(
|
||||||
sub: Subscription,
|
sub: Subscription,
|
||||||
conn: PooledConnection,
|
client_id: String,
|
||||||
|
pool: SqlitePool,
|
||||||
query_tx: tokio::sync::mpsc::Sender<QueryResult>,
|
query_tx: tokio::sync::mpsc::Sender<QueryResult>,
|
||||||
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
|
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
|
||||||
) {
|
) {
|
||||||
|
let pre_spawn_start = Instant::now();
|
||||||
task::spawn_blocking(move || {
|
task::spawn_blocking(move || {
|
||||||
debug!("going to query for: {:?}", sub);
|
let db_queue_time = pre_spawn_start.elapsed();
|
||||||
let mut row_count: usize = 0;
|
// if the queue time was very long (>5 seconds), spare the DB and abort.
|
||||||
|
if db_queue_time > Duration::from_secs(5) {
|
||||||
|
info!(
|
||||||
|
"shedding DB query load from {:?} (cid: {}, sub: {:?})",
|
||||||
|
db_queue_time, client_id, sub.id
|
||||||
|
);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
// otherwise, report queuing time if it is slow
|
||||||
|
else if db_queue_time > Duration::from_secs(1) {
|
||||||
|
debug!(
|
||||||
|
"(slow) DB query queued for {:?} (cid: {}, sub: {:?})",
|
||||||
|
db_queue_time, client_id, sub.id
|
||||||
|
);
|
||||||
|
}
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
|
let mut row_count: usize = 0;
|
||||||
// generate SQL query
|
// generate SQL query
|
||||||
let (q, p) = query_from_sub(&sub);
|
let (q, p) = query_from_sub(&sub);
|
||||||
// execute the query. Don't cache, since queries vary so much.
|
let sql_gen_elapsed = start.elapsed();
|
||||||
let mut stmt = conn.prepare(&q)?;
|
if sql_gen_elapsed > Duration::from_millis(10) {
|
||||||
let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?;
|
debug!("SQL (slow) generated in {:?}", start.elapsed());
|
||||||
while let Some(row) = event_rows.next()? {
|
}
|
||||||
// check if this is still active (we could do this every N rows)
|
// show pool stats
|
||||||
if abandon_query_rx.try_recv().is_ok() {
|
log_pool_stats("reader", &pool);
|
||||||
debug!("query aborted");
|
// cutoff for displaying slow queries
|
||||||
return Ok(());
|
let slow_cutoff = Duration::from_millis(2000);
|
||||||
|
// any client that doesn't cause us to generate new rows in 5
|
||||||
|
// seconds gets dropped.
|
||||||
|
let abort_cutoff = Duration::from_secs(5);
|
||||||
|
let start = Instant::now();
|
||||||
|
let mut slow_first_event;
|
||||||
|
let mut last_successful_send = Instant::now();
|
||||||
|
if let Ok(conn) = pool.get() {
|
||||||
|
// execute the query. Don't cache, since queries vary so much.
|
||||||
|
let mut stmt = conn.prepare(&q)?;
|
||||||
|
let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?;
|
||||||
|
let mut first_result = true;
|
||||||
|
while let Some(row) = event_rows.next()? {
|
||||||
|
let first_event_elapsed = start.elapsed();
|
||||||
|
slow_first_event = first_event_elapsed >= slow_cutoff;
|
||||||
|
if first_result {
|
||||||
|
debug!(
|
||||||
|
"first result in {:?} (cid: {}, sub: {:?})",
|
||||||
|
first_event_elapsed, client_id, sub.id
|
||||||
|
);
|
||||||
|
first_result = false;
|
||||||
|
}
|
||||||
|
// logging for slow queries; show sub and SQL.
|
||||||
|
// to reduce logging; only show 1/16th of clients (leading 0)
|
||||||
|
if row_count == 0 && slow_first_event && client_id.starts_with("0") {
|
||||||
|
debug!(
|
||||||
|
"query req (slow): {:?} (cid: {}, sub: {:?})",
|
||||||
|
sub, client_id, sub.id
|
||||||
|
);
|
||||||
|
debug!(
|
||||||
|
"query string (slow): {} (cid: {}, sub: {:?})",
|
||||||
|
q, client_id, sub.id
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
trace!(
|
||||||
|
"query req: {:?} (cid: {}, sub: {:?})",
|
||||||
|
sub,
|
||||||
|
client_id,
|
||||||
|
sub.id
|
||||||
|
);
|
||||||
|
trace!(
|
||||||
|
"query string: {} (cid: {}, sub: {:?})",
|
||||||
|
q,
|
||||||
|
client_id,
|
||||||
|
sub.id
|
||||||
|
);
|
||||||
|
}
|
||||||
|
// check if this is still active; every 100 rows
|
||||||
|
if row_count % 100 == 0 && abandon_query_rx.try_recv().is_ok() {
|
||||||
|
debug!("query aborted (cid: {}, sub: {:?})", client_id, sub.id);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
row_count += 1;
|
||||||
|
let event_json = row.get(0)?;
|
||||||
|
loop {
|
||||||
|
if query_tx.capacity() != 0 {
|
||||||
|
// we have capacity to add another item
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
// the queue is full
|
||||||
|
trace!("db reader thread is stalled");
|
||||||
|
if last_successful_send + abort_cutoff < Instant::now() {
|
||||||
|
// the queue has been full for too long, abort
|
||||||
|
info!("aborting database query due to slow client");
|
||||||
|
let ok: Result<()> = Ok(());
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
// give the queue a chance to clear before trying again
|
||||||
|
thread::sleep(Duration::from_millis(100));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO: we could use try_send, but we'd have to juggle
|
||||||
|
// getting the query result back as part of the error
|
||||||
|
// result.
|
||||||
|
query_tx
|
||||||
|
.blocking_send(QueryResult {
|
||||||
|
sub_id: sub.get_id(),
|
||||||
|
event: event_json,
|
||||||
|
})
|
||||||
|
.ok();
|
||||||
|
last_successful_send = Instant::now();
|
||||||
}
|
}
|
||||||
row_count += 1;
|
|
||||||
let event_json = row.get(0)?;
|
|
||||||
query_tx
|
query_tx
|
||||||
.blocking_send(QueryResult {
|
.blocking_send(QueryResult {
|
||||||
sub_id: sub.get_id(),
|
sub_id: sub.get_id(),
|
||||||
event: event_json,
|
event: "EOSE".to_string(),
|
||||||
})
|
})
|
||||||
.ok();
|
.ok();
|
||||||
|
debug!(
|
||||||
|
"query completed in {:?} (cid: {}, sub: {:?}, db_time: {:?}, rows: {})",
|
||||||
|
pre_spawn_start.elapsed(),
|
||||||
|
client_id,
|
||||||
|
sub.id,
|
||||||
|
start.elapsed(),
|
||||||
|
row_count
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
warn!("Could not get a database connection for querying");
|
||||||
}
|
}
|
||||||
debug!(
|
|
||||||
"query completed ({} rows) in {:?}",
|
|
||||||
row_count,
|
|
||||||
start.elapsed()
|
|
||||||
);
|
|
||||||
let ok: Result<()> = Ok(());
|
let ok: Result<()> = Ok(());
|
||||||
ok
|
ok
|
||||||
});
|
});
|
||||||
|
416
src/delegation.rs
Normal file
416
src/delegation.rs
Normal file
@@ -0,0 +1,416 @@
|
|||||||
|
//! Event parsing and validation
|
||||||
|
use crate::error::Error;
|
||||||
|
use crate::error::Result;
|
||||||
|
use crate::event::Event;
|
||||||
|
use bitcoin_hashes::{sha256, Hash};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use regex::Regex;
|
||||||
|
use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::str::FromStr;
|
||||||
|
use tracing::{debug, info};
|
||||||
|
|
||||||
|
// This handles everything related to delegation, in particular the
|
||||||
|
// condition/rune parsing and logic.
|
||||||
|
|
||||||
|
// Conditions are poorly specified, so we will implement the minimum
|
||||||
|
// necessary for now.
|
||||||
|
|
||||||
|
// fields MUST be either "kind" or "created_at".
|
||||||
|
// operators supported are ">", "<", "=", "!".
|
||||||
|
// no operations on 'content' are supported.
|
||||||
|
|
||||||
|
// this allows constraints for:
|
||||||
|
// valid date ranges (valid from X->Y dates).
|
||||||
|
// specific kinds (publish kind=1,5)
|
||||||
|
// kind ranges (publish ephemeral events, kind>19999&kind<30001)
|
||||||
|
|
||||||
|
// for more complex scenarios (allow delegatee to publish ephemeral
|
||||||
|
// AND replacement events), it may be necessary to generate and use
|
||||||
|
// different condition strings, since we do not support grouping or
|
||||||
|
// "OR" logic.
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
/// Secp256k1 verification instance.
|
||||||
|
pub static ref SECP: Secp256k1<VerifyOnly> = Secp256k1::verification_only();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||||
|
pub enum Field {
|
||||||
|
Kind,
|
||||||
|
CreatedAt,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for Field {
|
||||||
|
type Err = Error;
|
||||||
|
fn from_str(value: &str) -> Result<Self, Self::Err> {
|
||||||
|
if value == "kind" {
|
||||||
|
Ok(Field::Kind)
|
||||||
|
} else if value == "created_at" {
|
||||||
|
Ok(Field::CreatedAt)
|
||||||
|
} else {
|
||||||
|
Err(Error::DelegationParseError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||||
|
pub enum Operator {
|
||||||
|
LessThan,
|
||||||
|
GreaterThan,
|
||||||
|
Equals,
|
||||||
|
NotEquals,
|
||||||
|
}
|
||||||
|
impl FromStr for Operator {
|
||||||
|
type Err = Error;
|
||||||
|
fn from_str(value: &str) -> Result<Self, Self::Err> {
|
||||||
|
if value == "<" {
|
||||||
|
Ok(Operator::LessThan)
|
||||||
|
} else if value == ">" {
|
||||||
|
Ok(Operator::GreaterThan)
|
||||||
|
} else if value == "=" {
|
||||||
|
Ok(Operator::Equals)
|
||||||
|
} else if value == "!" {
|
||||||
|
Ok(Operator::NotEquals)
|
||||||
|
} else {
|
||||||
|
Err(Error::DelegationParseError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||||
|
pub struct ConditionQuery {
|
||||||
|
pub(crate) conditions: Vec<Condition>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConditionQuery {
|
||||||
|
pub fn allows_event(&self, event: &Event) -> bool {
|
||||||
|
// check each condition, to ensure that the event complies
|
||||||
|
// with the restriction.
|
||||||
|
for c in &self.conditions {
|
||||||
|
if !c.allows_event(event) {
|
||||||
|
// any failing conditions invalidates the delegation
|
||||||
|
// on this event
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// delegation was permitted unconditionally, or all conditions
|
||||||
|
// were true
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the delegator approved the delegation; return a ConditionQuery if so.
|
||||||
|
pub fn validate_delegation(
|
||||||
|
delegator: &str,
|
||||||
|
delegatee: &str,
|
||||||
|
cond_query: &str,
|
||||||
|
sigstr: &str,
|
||||||
|
) -> Option<ConditionQuery> {
|
||||||
|
// form the token
|
||||||
|
let tok = format!("nostr:delegation:{}:{}", delegatee, cond_query);
|
||||||
|
// form SHA256 hash
|
||||||
|
let digest: sha256::Hash = sha256::Hash::hash(tok.as_bytes());
|
||||||
|
let sig = schnorr::Signature::from_str(sigstr).unwrap();
|
||||||
|
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
|
||||||
|
if let Ok(pubkey) = XOnlyPublicKey::from_str(delegator) {
|
||||||
|
let verify = SECP.verify_schnorr(&sig, &msg, &pubkey);
|
||||||
|
if verify.is_ok() {
|
||||||
|
// return the parsed condition query
|
||||||
|
cond_query.parse::<ConditionQuery>().ok()
|
||||||
|
} else {
|
||||||
|
debug!("client sent an delegation signature that did not validate");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
debug!("client sent malformed delegation pubkey");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
info!("error converting delegation digest to secp256k1 message");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parsed delegation condition
|
||||||
|
/// see https://github.com/nostr-protocol/nips/pull/28#pullrequestreview-1084903800
|
||||||
|
/// An example complex condition would be: kind=1,2,3&created_at<1665265999
|
||||||
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||||
|
pub struct Condition {
|
||||||
|
pub(crate) field: Field,
|
||||||
|
pub(crate) operator: Operator,
|
||||||
|
pub(crate) values: Vec<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Condition {
|
||||||
|
/// Check if this condition allows the given event to be delegated
|
||||||
|
pub fn allows_event(&self, event: &Event) -> bool {
|
||||||
|
// determine what the right-hand side of the operator is
|
||||||
|
let resolved_field = match &self.field {
|
||||||
|
Field::Kind => event.kind,
|
||||||
|
Field::CreatedAt => event.created_at,
|
||||||
|
};
|
||||||
|
match &self.operator {
|
||||||
|
Operator::LessThan => {
|
||||||
|
// the less-than operator is only valid for single values.
|
||||||
|
if self.values.len() == 1 {
|
||||||
|
if let Some(v) = self.values.first() {
|
||||||
|
return resolved_field < *v;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Operator::GreaterThan => {
|
||||||
|
// the greater-than operator is only valid for single values.
|
||||||
|
if self.values.len() == 1 {
|
||||||
|
if let Some(v) = self.values.first() {
|
||||||
|
return resolved_field > *v;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Operator::Equals => {
|
||||||
|
// equals is interpreted as "must be equal to at least one provided value"
|
||||||
|
return self.values.iter().any(|&x| resolved_field == x);
|
||||||
|
}
|
||||||
|
Operator::NotEquals => {
|
||||||
|
// not-equals is interpreted as "must not be equal to any provided value"
|
||||||
|
// this is the one case where an empty list of values could be allowed; even though it is a pointless restriction.
|
||||||
|
return self.values.iter().all(|&x| resolved_field != x);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn str_to_condition(cs: &str) -> Option<Condition> {
|
||||||
|
// a condition is a string (alphanum+underscore), an operator (<>=!), and values (num+comma)
|
||||||
|
lazy_static! {
|
||||||
|
static ref RE: Regex = Regex::new("([[:word:]]+)([<>=!]+)([,[[:digit:]]]*)").unwrap();
|
||||||
|
}
|
||||||
|
// match against the regex
|
||||||
|
let caps = RE.captures(cs)?;
|
||||||
|
let field = caps.get(1)?.as_str().parse::<Field>().ok()?;
|
||||||
|
let operator = caps.get(2)?.as_str().parse::<Operator>().ok()?;
|
||||||
|
// values are just comma separated numbers, but all must be parsed
|
||||||
|
let rawvals = caps.get(3)?.as_str();
|
||||||
|
let values = rawvals
|
||||||
|
.split_terminator(',')
|
||||||
|
.map(|n| n.parse::<u64>().ok())
|
||||||
|
.collect::<Option<Vec<_>>>()?;
|
||||||
|
// convert field string into Field
|
||||||
|
Some(Condition {
|
||||||
|
field,
|
||||||
|
operator,
|
||||||
|
values,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse a condition query from a string slice
|
||||||
|
impl FromStr for ConditionQuery {
|
||||||
|
type Err = Error;
|
||||||
|
fn from_str(value: &str) -> Result<Self, Self::Err> {
|
||||||
|
// split the string with '&'
|
||||||
|
let mut conditions = vec![];
|
||||||
|
let condstrs = value.split_terminator('&');
|
||||||
|
// parse each individual condition
|
||||||
|
for c in condstrs {
|
||||||
|
conditions.push(str_to_condition(c).ok_or(Error::DelegationParseError)?);
|
||||||
|
}
|
||||||
|
Ok(ConditionQuery { conditions })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
// parse condition strings
|
||||||
|
#[test]
|
||||||
|
fn parse_empty() -> Result<()> {
|
||||||
|
// given an empty condition query, produce an empty vector
|
||||||
|
let empty_cq = ConditionQuery { conditions: vec![] };
|
||||||
|
let parsed = "".parse::<ConditionQuery>()?;
|
||||||
|
assert_eq!(parsed, empty_cq);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse field 'kind'
|
||||||
|
#[test]
|
||||||
|
fn test_kind_field_parse() -> Result<()> {
|
||||||
|
let field = "kind".parse::<Field>()?;
|
||||||
|
assert_eq!(field, Field::Kind);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
// parse field 'created_at'
|
||||||
|
#[test]
|
||||||
|
fn test_created_at_field_parse() -> Result<()> {
|
||||||
|
let field = "created_at".parse::<Field>()?;
|
||||||
|
assert_eq!(field, Field::CreatedAt);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
// parse unknown field
|
||||||
|
#[test]
|
||||||
|
fn unknown_field_parse() {
|
||||||
|
let field = "unk".parse::<Field>();
|
||||||
|
assert!(field.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse a full conditional query with an empty array
|
||||||
|
#[test]
|
||||||
|
fn parse_kind_equals_empty() -> Result<()> {
|
||||||
|
// given an empty condition query, produce an empty vector
|
||||||
|
let kind_cq = ConditionQuery {
|
||||||
|
conditions: vec![Condition {
|
||||||
|
field: Field::Kind,
|
||||||
|
operator: Operator::Equals,
|
||||||
|
values: vec![],
|
||||||
|
}],
|
||||||
|
};
|
||||||
|
let parsed = "kind=".parse::<ConditionQuery>()?;
|
||||||
|
assert_eq!(parsed, kind_cq);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
// parse a full conditional query with a single value
|
||||||
|
#[test]
|
||||||
|
fn parse_kind_equals_singleval() -> Result<()> {
|
||||||
|
// given an empty condition query, produce an empty vector
|
||||||
|
let kind_cq = ConditionQuery {
|
||||||
|
conditions: vec![Condition {
|
||||||
|
field: Field::Kind,
|
||||||
|
operator: Operator::Equals,
|
||||||
|
values: vec![1],
|
||||||
|
}],
|
||||||
|
};
|
||||||
|
let parsed = "kind=1".parse::<ConditionQuery>()?;
|
||||||
|
assert_eq!(parsed, kind_cq);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
// parse a full conditional query with multiple values
|
||||||
|
#[test]
|
||||||
|
fn parse_kind_equals_multival() -> Result<()> {
|
||||||
|
// given an empty condition query, produce an empty vector
|
||||||
|
let kind_cq = ConditionQuery {
|
||||||
|
conditions: vec![Condition {
|
||||||
|
field: Field::Kind,
|
||||||
|
operator: Operator::Equals,
|
||||||
|
values: vec![1, 2, 4],
|
||||||
|
}],
|
||||||
|
};
|
||||||
|
let parsed = "kind=1,2,4".parse::<ConditionQuery>()?;
|
||||||
|
assert_eq!(parsed, kind_cq);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
// parse multiple conditions
|
||||||
|
#[test]
|
||||||
|
fn parse_multi_conditions() -> Result<()> {
|
||||||
|
// given an empty condition query, produce an empty vector
|
||||||
|
let cq = ConditionQuery {
|
||||||
|
conditions: vec![
|
||||||
|
Condition {
|
||||||
|
field: Field::Kind,
|
||||||
|
operator: Operator::GreaterThan,
|
||||||
|
values: vec![10000],
|
||||||
|
},
|
||||||
|
Condition {
|
||||||
|
field: Field::Kind,
|
||||||
|
operator: Operator::LessThan,
|
||||||
|
values: vec![20000],
|
||||||
|
},
|
||||||
|
Condition {
|
||||||
|
field: Field::Kind,
|
||||||
|
operator: Operator::NotEquals,
|
||||||
|
values: vec![10001],
|
||||||
|
},
|
||||||
|
Condition {
|
||||||
|
field: Field::CreatedAt,
|
||||||
|
operator: Operator::LessThan,
|
||||||
|
values: vec![1665867123],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
let parsed =
|
||||||
|
"kind>10000&kind<20000&kind!10001&created_at<1665867123".parse::<ConditionQuery>()?;
|
||||||
|
assert_eq!(parsed, cq);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
fn simple_event() -> Event {
|
||||||
|
Event {
|
||||||
|
id: "0".to_owned(),
|
||||||
|
pubkey: "0".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
|
created_at: 0,
|
||||||
|
kind: 0,
|
||||||
|
tags: vec![],
|
||||||
|
content: "".to_owned(),
|
||||||
|
sig: "0".to_owned(),
|
||||||
|
tagidx: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check for condition logic on event w/ empty values
|
||||||
|
#[test]
|
||||||
|
fn condition_with_empty_values() {
|
||||||
|
let mut c = Condition {
|
||||||
|
field: Field::Kind,
|
||||||
|
operator: Operator::GreaterThan,
|
||||||
|
values: vec![],
|
||||||
|
};
|
||||||
|
let e = simple_event();
|
||||||
|
assert!(!c.allows_event(&e));
|
||||||
|
c.operator = Operator::LessThan;
|
||||||
|
assert!(!c.allows_event(&e));
|
||||||
|
c.operator = Operator::Equals;
|
||||||
|
assert!(!c.allows_event(&e));
|
||||||
|
// Not Equals applied to an empty list *is* allowed
|
||||||
|
// (pointless, but logically valid).
|
||||||
|
c.operator = Operator::NotEquals;
|
||||||
|
assert!(c.allows_event(&e));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for condition logic on event w/ single value
|
||||||
|
#[test]
|
||||||
|
fn condition_kind_gt_event_single() {
|
||||||
|
let c = Condition {
|
||||||
|
field: Field::Kind,
|
||||||
|
operator: Operator::GreaterThan,
|
||||||
|
values: vec![10],
|
||||||
|
};
|
||||||
|
let mut e = simple_event();
|
||||||
|
// kind is not greater than 10, not allowed
|
||||||
|
e.kind = 1;
|
||||||
|
assert!(!c.allows_event(&e));
|
||||||
|
// kind is greater than 10, allowed
|
||||||
|
e.kind = 100;
|
||||||
|
assert!(c.allows_event(&e));
|
||||||
|
// kind is 10, not allowed
|
||||||
|
e.kind = 10;
|
||||||
|
assert!(!c.allows_event(&e));
|
||||||
|
}
|
||||||
|
// Check for condition logic on event w/ multi values
|
||||||
|
#[test]
|
||||||
|
fn condition_with_multi_values() {
|
||||||
|
let mut c = Condition {
|
||||||
|
field: Field::Kind,
|
||||||
|
operator: Operator::Equals,
|
||||||
|
values: vec![0, 10, 20],
|
||||||
|
};
|
||||||
|
let mut e = simple_event();
|
||||||
|
// Allow if event kind is in list for Equals
|
||||||
|
e.kind = 10;
|
||||||
|
assert!(c.allows_event(&e));
|
||||||
|
// Deny if event kind is not in list for Equals
|
||||||
|
e.kind = 11;
|
||||||
|
assert!(!c.allows_event(&e));
|
||||||
|
// Deny if event kind is in list for NotEquals
|
||||||
|
e.kind = 10;
|
||||||
|
c.operator = Operator::NotEquals;
|
||||||
|
assert!(!c.allows_event(&e));
|
||||||
|
// Allow if event kind is not in list for NotEquals
|
||||||
|
e.kind = 99;
|
||||||
|
c.operator = Operator::NotEquals;
|
||||||
|
assert!(c.allows_event(&e));
|
||||||
|
// Always deny if GreaterThan/LessThan for a list
|
||||||
|
c.operator = Operator::LessThan;
|
||||||
|
assert!(!c.allows_event(&e));
|
||||||
|
c.operator = Operator::GreaterThan;
|
||||||
|
assert!(!c.allows_event(&e));
|
||||||
|
}
|
||||||
|
}
|
22
src/error.rs
22
src/error.rs
@@ -17,10 +17,16 @@ pub enum Error {
|
|||||||
ConnWriteError,
|
ConnWriteError,
|
||||||
#[error("EVENT parse failed")]
|
#[error("EVENT parse failed")]
|
||||||
EventParseFailed,
|
EventParseFailed,
|
||||||
#[error("ClOSE message parse failed")]
|
#[error("CLOSE message parse failed")]
|
||||||
CloseParseFailed,
|
CloseParseFailed,
|
||||||
#[error("Event validation failed")]
|
#[error("Event invalid signature")]
|
||||||
EventInvalid,
|
EventInvalidSignature,
|
||||||
|
#[error("Event invalid id")]
|
||||||
|
EventInvalidId,
|
||||||
|
#[error("Event malformed pubkey")]
|
||||||
|
EventMalformedPubkey,
|
||||||
|
#[error("Event could not canonicalize")]
|
||||||
|
EventCouldNotCanonicalize,
|
||||||
#[error("Event too large")]
|
#[error("Event too large")]
|
||||||
EventMaxLengthError(usize),
|
EventMaxLengthError(usize),
|
||||||
#[error("Subscription identifier max length exceeded")]
|
#[error("Subscription identifier max length exceeded")]
|
||||||
@@ -48,6 +54,10 @@ pub enum Error {
|
|||||||
JoinError,
|
JoinError,
|
||||||
#[error("Hyper Client error")]
|
#[error("Hyper Client error")]
|
||||||
HyperError(hyper::Error),
|
HyperError(hyper::Error),
|
||||||
|
#[error("Hex encoding error")]
|
||||||
|
HexError(hex::FromHexError),
|
||||||
|
#[error("Delegation parse error")]
|
||||||
|
DelegationParseError,
|
||||||
#[error("Unknown/Undocumented")]
|
#[error("Unknown/Undocumented")]
|
||||||
UnknownError,
|
UnknownError,
|
||||||
}
|
}
|
||||||
@@ -58,6 +68,12 @@ pub enum Error {
|
|||||||
// }
|
// }
|
||||||
//}
|
//}
|
||||||
|
|
||||||
|
impl From<hex::FromHexError> for Error {
|
||||||
|
fn from(h: hex::FromHexError) -> Self {
|
||||||
|
Error::HexError(h)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<hyper::Error> for Error {
|
impl From<hyper::Error> for Error {
|
||||||
fn from(h: hyper::Error) -> Self {
|
fn from(h: hyper::Error) -> Self {
|
||||||
Error::HyperError(h)
|
Error::HyperError(h)
|
||||||
|
229
src/event.rs
229
src/event.rs
@@ -1,12 +1,11 @@
|
|||||||
//! Event parsing and validation
|
//! Event parsing and validation
|
||||||
use crate::config;
|
use crate::delegation::validate_delegation;
|
||||||
use crate::error::Error::*;
|
use crate::error::Error::*;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::nip05;
|
use crate::nip05;
|
||||||
use crate::utils::unix_time;
|
use crate::utils::unix_time;
|
||||||
use bitcoin_hashes::{sha256, Hash};
|
use bitcoin_hashes::{sha256, Hash};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use log::*;
|
|
||||||
use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
|
use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
|
||||||
use serde::{Deserialize, Deserializer, Serialize};
|
use serde::{Deserialize, Deserializer, Serialize};
|
||||||
use serde_json::value::Value;
|
use serde_json::value::Value;
|
||||||
@@ -14,6 +13,7 @@ use serde_json::Number;
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
use tracing::{debug, info};
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
/// Secp256k1 verification instance.
|
/// Secp256k1 verification instance.
|
||||||
@@ -21,17 +21,25 @@ lazy_static! {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Event command in network format.
|
/// Event command in network format.
|
||||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||||
pub struct EventCmd {
|
pub struct EventCmd {
|
||||||
cmd: String, // expecting static "EVENT"
|
cmd: String, // expecting static "EVENT"
|
||||||
event: Event,
|
event: Event,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl EventCmd {
|
||||||
|
pub fn event_id(&self) -> &str {
|
||||||
|
&self.event.id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Parsed nostr event.
|
/// Parsed nostr event.
|
||||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||||
pub struct Event {
|
pub struct Event {
|
||||||
pub id: String,
|
pub id: String,
|
||||||
pub(crate) pubkey: String,
|
pub(crate) pubkey: String,
|
||||||
|
#[serde(skip)]
|
||||||
|
pub(crate) delegated_by: Option<String>,
|
||||||
pub(crate) created_at: u64,
|
pub(crate) created_at: u64,
|
||||||
pub(crate) kind: u64,
|
pub(crate) kind: u64,
|
||||||
#[serde(deserialize_with = "tag_from_string")]
|
#[serde(deserialize_with = "tag_from_string")]
|
||||||
@@ -39,9 +47,9 @@ pub struct Event {
|
|||||||
pub(crate) tags: Vec<Vec<String>>,
|
pub(crate) tags: Vec<Vec<String>>,
|
||||||
pub(crate) content: String,
|
pub(crate) content: String,
|
||||||
pub(crate) sig: String,
|
pub(crate) sig: String,
|
||||||
// Optimization for tag search, built on demand
|
// Optimization for tag search, built on demand.
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
pub(crate) tagidx: Option<HashMap<String, HashSet<String>>>,
|
pub(crate) tagidx: Option<HashMap<char, HashSet<String>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Simple tag type for array of array of strings.
|
/// Simple tag type for array of array of strings.
|
||||||
@@ -53,7 +61,26 @@ where
|
|||||||
D: Deserializer<'de>,
|
D: Deserializer<'de>,
|
||||||
{
|
{
|
||||||
let opt = Option::deserialize(deserializer)?;
|
let opt = Option::deserialize(deserializer)?;
|
||||||
Ok(opt.unwrap_or_else(Vec::new))
|
Ok(opt.unwrap_or_default())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempt to form a single-char tag name.
|
||||||
|
pub fn single_char_tagname(tagname: &str) -> Option<char> {
|
||||||
|
// We return the tag character if and only if the tagname consists
|
||||||
|
// of a single char.
|
||||||
|
let mut tagnamechars = tagname.chars();
|
||||||
|
let firstchar = tagnamechars.next();
|
||||||
|
match firstchar {
|
||||||
|
Some(_) => {
|
||||||
|
// check second char
|
||||||
|
if tagnamechars.next().is_none() {
|
||||||
|
firstchar
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert network event to parsed/validated event.
|
/// Convert network event to parsed/validated event.
|
||||||
@@ -62,12 +89,13 @@ impl From<EventCmd> for Result<Event> {
|
|||||||
// ensure command is correct
|
// ensure command is correct
|
||||||
if ec.cmd != "EVENT" {
|
if ec.cmd != "EVENT" {
|
||||||
Err(CommandUnknownError)
|
Err(CommandUnknownError)
|
||||||
} else if ec.event.is_valid() {
|
|
||||||
let mut e = ec.event;
|
|
||||||
e.build_index();
|
|
||||||
Ok(e)
|
|
||||||
} else {
|
} else {
|
||||||
Err(EventInvalid)
|
ec.event.validate().map(|_| {
|
||||||
|
let mut e = ec.event;
|
||||||
|
e.build_index();
|
||||||
|
e.update_delegation();
|
||||||
|
e
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -92,6 +120,50 @@ impl Event {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// is this event delegated (properly)?
|
||||||
|
// does the signature match, and are conditions valid?
|
||||||
|
// if so, return an alternate author for the event
|
||||||
|
pub fn delegated_author(&self) -> Option<String> {
|
||||||
|
// is there a delegation tag?
|
||||||
|
let delegation_tag: Vec<String> = self
|
||||||
|
.tags
|
||||||
|
.iter()
|
||||||
|
.filter(|x| x.len() == 4)
|
||||||
|
.filter(|x| x.get(0).unwrap() == "delegation")
|
||||||
|
.take(1)
|
||||||
|
.next()?
|
||||||
|
.to_vec(); // get first tag
|
||||||
|
|
||||||
|
//let delegation_tag = self.tag_values_by_name("delegation");
|
||||||
|
// delegation tags should have exactly 3 elements after the name (pubkey, condition, sig)
|
||||||
|
// the event is signed by the delagatee
|
||||||
|
let delegatee = &self.pubkey;
|
||||||
|
// the delegation tag references the claimed delagator
|
||||||
|
let delegator: &str = delegation_tag.get(1)?;
|
||||||
|
let querystr: &str = delegation_tag.get(2)?;
|
||||||
|
let sig: &str = delegation_tag.get(3)?;
|
||||||
|
|
||||||
|
// attempt to get a condition query; this requires the delegation to have a valid signature.
|
||||||
|
if let Some(cond_query) = validate_delegation(delegator, delegatee, querystr, sig) {
|
||||||
|
// The signature was valid, now we ensure the delegation
|
||||||
|
// condition is valid for this event:
|
||||||
|
if cond_query.allows_event(self) {
|
||||||
|
// since this is allowed, we will provide the delegatee
|
||||||
|
Some(delegator.into())
|
||||||
|
} else {
|
||||||
|
debug!("an event failed to satisfy delegation conditions");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
debug!("event had had invalid delegation signature");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update delegation status
|
||||||
|
fn update_delegation(&mut self) {
|
||||||
|
self.delegated_by = self.delegated_author();
|
||||||
|
}
|
||||||
/// Build an event tag index
|
/// Build an event tag index
|
||||||
fn build_index(&mut self) {
|
fn build_index(&mut self) {
|
||||||
// if there are no tags; just leave the index as None
|
// if there are no tags; just leave the index as None
|
||||||
@@ -99,18 +171,21 @@ impl Event {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// otherwise, build an index
|
// otherwise, build an index
|
||||||
let mut idx: HashMap<String, HashSet<String>> = HashMap::new();
|
let mut idx: HashMap<char, HashSet<String>> = HashMap::new();
|
||||||
// iterate over tags that have at least 2 elements
|
// iterate over tags that have at least 2 elements
|
||||||
for t in self.tags.iter().filter(|x| x.len() > 1) {
|
for t in self.tags.iter().filter(|x| x.len() > 1) {
|
||||||
let tagname = t.get(0).unwrap();
|
let tagname = t.get(0).unwrap();
|
||||||
|
let tagnamechar_opt = single_char_tagname(tagname);
|
||||||
|
if tagnamechar_opt.is_none() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let tagnamechar = tagnamechar_opt.unwrap();
|
||||||
let tagval = t.get(1).unwrap();
|
let tagval = t.get(1).unwrap();
|
||||||
// ensure a vector exists for this tag
|
// ensure a vector exists for this tag
|
||||||
if !idx.contains_key(tagname) {
|
idx.entry(tagnamechar).or_insert_with(HashSet::new);
|
||||||
idx.insert(tagname.clone(), HashSet::new());
|
|
||||||
}
|
|
||||||
// get the tag vec and insert entry
|
// get the tag vec and insert entry
|
||||||
let tidx = idx.get_mut(tagname).expect("could not get tag vector");
|
let idx_tag_vec = idx.get_mut(&tagnamechar).expect("could not get tag vector");
|
||||||
tidx.insert(tagval.clone());
|
idx_tag_vec.insert(tagval.clone());
|
||||||
}
|
}
|
||||||
// save the tag structure
|
// save the tag structure
|
||||||
self.tagidx = Some(idx);
|
self.tagidx = Some(idx);
|
||||||
@@ -124,24 +199,35 @@ impl Event {
|
|||||||
self.pubkey.chars().take(8).collect()
|
self.pubkey.chars().take(8).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if this event has a valid signature.
|
/// Retrieve tag initial values across all tags matching the name
|
||||||
fn is_valid(&self) -> bool {
|
pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
|
||||||
// TODO: return a Result with a reason for invalid events
|
self.tags
|
||||||
// don't bother to validate an event with a timestamp in the distant future.
|
.iter()
|
||||||
let config = config::SETTINGS.read().unwrap();
|
.filter(|x| x.len() > 1)
|
||||||
let max_future_sec = config.options.reject_future_seconds;
|
.filter(|x| x.get(0).unwrap() == tag_name)
|
||||||
if let Some(allowable_future) = max_future_sec {
|
.map(|x| x.get(1).unwrap().to_owned())
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
|
||||||
|
if let Some(allowable_future) = reject_future_seconds {
|
||||||
let curr_time = unix_time();
|
let curr_time = unix_time();
|
||||||
// calculate difference, plus how far future we allow
|
// calculate difference, plus how far future we allow
|
||||||
if curr_time + (allowable_future as u64) < self.created_at {
|
if curr_time + (allowable_future as u64) < self.created_at {
|
||||||
let delta = self.created_at - curr_time;
|
let delta = self.created_at - curr_time;
|
||||||
debug!(
|
debug!(
|
||||||
"Event is too far in the future ({} seconds), rejecting",
|
"event is too far in the future ({} seconds), rejecting",
|
||||||
delta
|
delta
|
||||||
);
|
);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if this event has a valid signature.
|
||||||
|
fn validate(&self) -> Result<()> {
|
||||||
|
// TODO: return a Result with a reason for invalid events
|
||||||
// validation is performed by:
|
// validation is performed by:
|
||||||
// * parsing JSON string into event fields
|
// * parsing JSON string into event fields
|
||||||
// * create an array:
|
// * create an array:
|
||||||
@@ -149,8 +235,8 @@ impl Event {
|
|||||||
// * serialize with no spaces/newlines
|
// * serialize with no spaces/newlines
|
||||||
let c_opt = self.to_canonical();
|
let c_opt = self.to_canonical();
|
||||||
if c_opt.is_none() {
|
if c_opt.is_none() {
|
||||||
debug!("event could not be canonicalized");
|
debug!("could not canonicalize");
|
||||||
return false;
|
return Err(EventCouldNotCanonicalize);
|
||||||
}
|
}
|
||||||
let c = c_opt.unwrap();
|
let c = c_opt.unwrap();
|
||||||
// * compute the sha256sum.
|
// * compute the sha256sum.
|
||||||
@@ -159,22 +245,21 @@ impl Event {
|
|||||||
// * ensure the id matches the computed sha256sum.
|
// * ensure the id matches the computed sha256sum.
|
||||||
if self.id != hex_digest {
|
if self.id != hex_digest {
|
||||||
debug!("event id does not match digest");
|
debug!("event id does not match digest");
|
||||||
return false;
|
return Err(EventInvalidId);
|
||||||
}
|
}
|
||||||
// * validate the message digest (sig) using the pubkey & computed sha256 message hash.
|
// * validate the message digest (sig) using the pubkey & computed sha256 message hash.
|
||||||
|
|
||||||
let sig = schnorr::Signature::from_str(&self.sig).unwrap();
|
let sig = schnorr::Signature::from_str(&self.sig).unwrap();
|
||||||
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
|
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
|
||||||
if let Ok(pubkey) = XOnlyPublicKey::from_str(&self.pubkey) {
|
if let Ok(pubkey) = XOnlyPublicKey::from_str(&self.pubkey) {
|
||||||
let verify = SECP.verify_schnorr(&sig, &msg, &pubkey);
|
SECP.verify_schnorr(&sig, &msg, &pubkey)
|
||||||
matches!(verify, Ok(()))
|
.map_err(|_| EventInvalidSignature)
|
||||||
} else {
|
} else {
|
||||||
debug!("Client sent malformed pubkey");
|
debug!("client sent malformed pubkey");
|
||||||
false
|
Err(EventMalformedPubkey)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
info!("Error converting digest to secp256k1 message");
|
info!("error converting digest to secp256k1 message");
|
||||||
false
|
Err(EventInvalidSignature)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -216,9 +301,10 @@ impl Event {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Determine if the given tag and value set intersect with tags in this event.
|
/// Determine if the given tag and value set intersect with tags in this event.
|
||||||
pub fn generic_tag_val_intersect(&self, tagname: &str, check: &HashSet<String>) -> bool {
|
pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
|
||||||
match &self.tagidx {
|
match &self.tagidx {
|
||||||
Some(idx) => match idx.get(tagname) {
|
// check if this is indexable tagname
|
||||||
|
Some(idx) => match idx.get(&tagname) {
|
||||||
Some(valset) => {
|
Some(valset) => {
|
||||||
let common = valset.intersection(check);
|
let common = valset.intersection(check);
|
||||||
common.count() > 0
|
common.count() > 0
|
||||||
@@ -237,6 +323,7 @@ mod tests {
|
|||||||
Event {
|
Event {
|
||||||
id: "0".to_owned(),
|
id: "0".to_owned(),
|
||||||
pubkey: "0".to_owned(),
|
pubkey: "0".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
created_at: 0,
|
created_at: 0,
|
||||||
kind: 0,
|
kind: 0,
|
||||||
tags: vec![],
|
tags: vec![],
|
||||||
@@ -263,26 +350,24 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn empty_event_tag_match() -> Result<()> {
|
fn empty_event_tag_match() {
|
||||||
let event = simple_event();
|
let event = simple_event();
|
||||||
assert!(!event
|
assert!(!event
|
||||||
.generic_tag_val_intersect("e", &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
|
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn single_event_tag_match() -> Result<()> {
|
fn single_event_tag_match() {
|
||||||
let mut event = simple_event();
|
let mut event = simple_event();
|
||||||
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
|
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
|
||||||
event.build_index();
|
event.build_index();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
event.generic_tag_val_intersect(
|
event.generic_tag_val_intersect(
|
||||||
"e",
|
'e',
|
||||||
&HashSet::from(["foo".to_owned(), "bar".to_owned()])
|
&HashSet::from(["foo".to_owned(), "bar".to_owned()])
|
||||||
),
|
),
|
||||||
true
|
true
|
||||||
);
|
);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -320,6 +405,7 @@ mod tests {
|
|||||||
let e = Event {
|
let e = Event {
|
||||||
id: "999".to_owned(),
|
id: "999".to_owned(),
|
||||||
pubkey: "012345".to_owned(),
|
pubkey: "012345".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
created_at: 501234,
|
created_at: 501234,
|
||||||
kind: 1,
|
kind: 1,
|
||||||
tags: vec![],
|
tags: vec![],
|
||||||
@@ -332,11 +418,66 @@ mod tests {
|
|||||||
assert_eq!(c, expected);
|
assert_eq!(c, expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn event_tag_select() {
|
||||||
|
let e = Event {
|
||||||
|
id: "999".to_owned(),
|
||||||
|
pubkey: "012345".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
|
created_at: 501234,
|
||||||
|
kind: 1,
|
||||||
|
tags: vec![
|
||||||
|
vec!["j".to_owned(), "abc".to_owned()],
|
||||||
|
vec!["e".to_owned(), "foo".to_owned()],
|
||||||
|
vec!["e".to_owned(), "bar".to_owned()],
|
||||||
|
vec!["e".to_owned(), "baz".to_owned()],
|
||||||
|
vec![
|
||||||
|
"p".to_owned(),
|
||||||
|
"aaaa".to_owned(),
|
||||||
|
"ws://example.com".to_owned(),
|
||||||
|
],
|
||||||
|
],
|
||||||
|
content: "this is a test".to_owned(),
|
||||||
|
sig: "abcde".to_owned(),
|
||||||
|
tagidx: None,
|
||||||
|
};
|
||||||
|
let v = e.tag_values_by_name("e");
|
||||||
|
assert_eq!(v, vec!["foo", "bar", "baz"]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn event_no_tag_select() {
|
||||||
|
let e = Event {
|
||||||
|
id: "999".to_owned(),
|
||||||
|
pubkey: "012345".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
|
created_at: 501234,
|
||||||
|
kind: 1,
|
||||||
|
tags: vec![
|
||||||
|
vec!["j".to_owned(), "abc".to_owned()],
|
||||||
|
vec!["e".to_owned(), "foo".to_owned()],
|
||||||
|
vec!["e".to_owned(), "baz".to_owned()],
|
||||||
|
vec![
|
||||||
|
"p".to_owned(),
|
||||||
|
"aaaa".to_owned(),
|
||||||
|
"ws://example.com".to_owned(),
|
||||||
|
],
|
||||||
|
],
|
||||||
|
content: "this is a test".to_owned(),
|
||||||
|
sig: "abcde".to_owned(),
|
||||||
|
tagidx: None,
|
||||||
|
};
|
||||||
|
let v = e.tag_values_by_name("x");
|
||||||
|
// asking for tags that don't exist just returns zero-length vector
|
||||||
|
assert_eq!(v.len(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn event_canonical_with_tags() {
|
fn event_canonical_with_tags() {
|
||||||
let e = Event {
|
let e = Event {
|
||||||
id: "999".to_owned(),
|
id: "999".to_owned(),
|
||||||
pubkey: "012345".to_owned(),
|
pubkey: "012345".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
created_at: 501234,
|
created_at: 501234,
|
||||||
kind: 1,
|
kind: 1,
|
||||||
tags: vec![
|
tags: vec![
|
||||||
|
@@ -3,7 +3,7 @@ use crate::utils::is_hex;
|
|||||||
use hex;
|
use hex;
|
||||||
|
|
||||||
/// Types of hexadecimal queries.
|
/// Types of hexadecimal queries.
|
||||||
#[derive(PartialEq, Debug, Clone)]
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)]
|
||||||
pub enum HexSearch {
|
pub enum HexSearch {
|
||||||
// when no range is needed, exact 32-byte
|
// when no range is needed, exact 32-byte
|
||||||
Exact(Vec<u8>),
|
Exact(Vec<u8>),
|
||||||
@@ -60,11 +60,10 @@ pub fn hex_range(s: &str) -> Option<HexSearch> {
|
|||||||
upper[byte_len] = b + 16; // bump up the first character in this byte
|
upper[byte_len] = b + 16; // bump up the first character in this byte
|
||||||
// increment done, stop iterating through the vec
|
// increment done, stop iterating through the vec
|
||||||
break;
|
break;
|
||||||
} else {
|
|
||||||
// if it is 'f', reset the byte to 0 and do a carry
|
|
||||||
// reset and carry
|
|
||||||
upper[byte_len] = 0;
|
|
||||||
}
|
}
|
||||||
|
// if it is 'f', reset the byte to 0 and do a carry
|
||||||
|
// reset and carry
|
||||||
|
upper[byte_len] = 0;
|
||||||
// done with odd logic, so don't repeat this
|
// done with odd logic, so don't repeat this
|
||||||
odd = false;
|
odd = false;
|
||||||
} else {
|
} else {
|
||||||
@@ -80,6 +79,7 @@ pub fn hex_range(s: &str) -> Option<HexSearch> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::error::Result;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn hex_range_exact() -> Result<()> {
|
fn hex_range_exact() -> Result<()> {
|
||||||
|
@@ -35,7 +35,7 @@ impl From<config::Info> for RelayInfo {
|
|||||||
description: i.description,
|
description: i.description,
|
||||||
pubkey: i.pubkey,
|
pubkey: i.pubkey,
|
||||||
contact: i.contact,
|
contact: i.contact,
|
||||||
supported_nips: Some(vec![1, 2, 11]),
|
supported_nips: Some(vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 26]),
|
||||||
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
|
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
|
||||||
version: CARGO_PKG_VERSION.map(|x| x.to_owned()),
|
version: CARGO_PKG_VERSION.map(|x| x.to_owned()),
|
||||||
}
|
}
|
||||||
|
@@ -2,11 +2,15 @@ pub mod close;
|
|||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod conn;
|
pub mod conn;
|
||||||
pub mod db;
|
pub mod db;
|
||||||
|
pub mod delegation;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod event;
|
pub mod event;
|
||||||
pub mod hexrange;
|
pub mod hexrange;
|
||||||
pub mod info;
|
pub mod info;
|
||||||
pub mod nip05;
|
pub mod nip05;
|
||||||
|
pub mod notice;
|
||||||
pub mod schema;
|
pub mod schema;
|
||||||
pub mod subscription;
|
pub mod subscription;
|
||||||
pub mod utils;
|
pub mod utils;
|
||||||
|
// Public API for creating relays programatically
|
||||||
|
pub mod server;
|
||||||
|
586
src/main.rs
586
src/main.rs
@@ -1,569 +1,51 @@
|
|||||||
//! Server process
|
//! Server process
|
||||||
use futures::SinkExt;
|
|
||||||
use futures::StreamExt;
|
|
||||||
use hyper::header::ACCEPT;
|
|
||||||
use hyper::service::{make_service_fn, service_fn};
|
|
||||||
use hyper::upgrade::Upgraded;
|
|
||||||
use hyper::{
|
|
||||||
header, server::conn::AddrStream, upgrade, Body, Request, Response, Server, StatusCode,
|
|
||||||
};
|
|
||||||
use log::*;
|
|
||||||
use nostr_rs_relay::close::Close;
|
|
||||||
use nostr_rs_relay::close::CloseCmd;
|
|
||||||
use nostr_rs_relay::config;
|
use nostr_rs_relay::config;
|
||||||
use nostr_rs_relay::conn;
|
use nostr_rs_relay::server::start_server;
|
||||||
use nostr_rs_relay::db;
|
|
||||||
use nostr_rs_relay::db::SubmittedEvent;
|
|
||||||
use nostr_rs_relay::error::{Error, Result};
|
|
||||||
use nostr_rs_relay::event::Event;
|
|
||||||
use nostr_rs_relay::event::EventCmd;
|
|
||||||
use nostr_rs_relay::info::RelayInfo;
|
|
||||||
use nostr_rs_relay::nip05;
|
|
||||||
use nostr_rs_relay::subscription::Subscription;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::convert::Infallible;
|
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::net::SocketAddr;
|
use std::sync::mpsc as syncmpsc;
|
||||||
use std::path::Path;
|
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
|
||||||
use std::time::Duration;
|
use std::thread;
|
||||||
use std::time::Instant;
|
use tracing::info;
|
||||||
use tokio::runtime::Builder;
|
|
||||||
use tokio::sync::broadcast::{self, Receiver, Sender};
|
use console_subscriber::ConsoleLayer;
|
||||||
use tokio::sync::mpsc;
|
|
||||||
use tokio::sync::oneshot;
|
|
||||||
use tokio_tungstenite::WebSocketStream;
|
|
||||||
use tungstenite::error::Error as WsError;
|
|
||||||
use tungstenite::handshake;
|
|
||||||
use tungstenite::protocol::Message;
|
|
||||||
use tungstenite::protocol::WebSocketConfig;
|
|
||||||
|
|
||||||
/// Return a requested DB name from command line arguments.
|
/// Return a requested DB name from command line arguments.
|
||||||
fn db_from_args(args: Vec<String>) -> Option<String> {
|
fn db_from_args(args: &[String]) -> Option<String> {
|
||||||
if args.len() == 3 && args.get(1) == Some(&"--db".to_owned()) {
|
if args.len() == 3 && args.get(1) == Some(&"--db".to_owned()) {
|
||||||
return args.get(2).map(|x| x.to_owned());
|
return args.get(2).map(std::clone::Clone::clone);
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle arbitrary HTTP requests, including for WebSocket upgrades.
|
|
||||||
async fn handle_web_request(
|
|
||||||
mut request: Request<Body>,
|
|
||||||
pool: db::SqlitePool,
|
|
||||||
remote_addr: SocketAddr,
|
|
||||||
broadcast: Sender<Event>,
|
|
||||||
event_tx: tokio::sync::mpsc::Sender<SubmittedEvent>,
|
|
||||||
shutdown: Receiver<()>,
|
|
||||||
) -> Result<Response<Body>, Infallible> {
|
|
||||||
match (
|
|
||||||
request.uri().path(),
|
|
||||||
request.headers().contains_key(header::UPGRADE),
|
|
||||||
) {
|
|
||||||
// Request for / as websocket
|
|
||||||
("/", true) => {
|
|
||||||
debug!("websocket with upgrade request");
|
|
||||||
//assume request is a handshake, so create the handshake response
|
|
||||||
let response = match handshake::server::create_response_with_body(&request, || {
|
|
||||||
Body::empty()
|
|
||||||
}) {
|
|
||||||
Ok(response) => {
|
|
||||||
//in case the handshake response creation succeeds,
|
|
||||||
//spawn a task to handle the websocket connection
|
|
||||||
tokio::spawn(async move {
|
|
||||||
//using the hyper feature of upgrading a connection
|
|
||||||
match upgrade::on(&mut request).await {
|
|
||||||
//if successfully upgraded
|
|
||||||
Ok(upgraded) => {
|
|
||||||
// set WebSocket configuration options
|
|
||||||
let mut config = WebSocketConfig::default();
|
|
||||||
{
|
|
||||||
let settings = config::SETTINGS.read().unwrap();
|
|
||||||
config.max_message_size = settings.limits.max_ws_message_bytes;
|
|
||||||
config.max_frame_size = settings.limits.max_ws_frame_bytes;
|
|
||||||
}
|
|
||||||
//create a websocket stream from the upgraded object
|
|
||||||
let ws_stream = WebSocketStream::from_raw_socket(
|
|
||||||
//pass the upgraded object
|
|
||||||
//as the base layer stream of the Websocket
|
|
||||||
upgraded,
|
|
||||||
tokio_tungstenite::tungstenite::protocol::Role::Server,
|
|
||||||
Some(config),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
tokio::spawn(nostr_server(
|
|
||||||
pool, ws_stream, broadcast, event_tx, shutdown,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Err(e) => println!(
|
|
||||||
"error when trying to upgrade connection \
|
|
||||||
from address {} to websocket connection. \
|
|
||||||
Error is: {}",
|
|
||||||
remote_addr, e
|
|
||||||
),
|
|
||||||
}
|
|
||||||
});
|
|
||||||
//return the response to the handshake request
|
|
||||||
response
|
|
||||||
}
|
|
||||||
Err(error) => {
|
|
||||||
warn!("websocket response failed");
|
|
||||||
let mut res =
|
|
||||||
Response::new(Body::from(format!("Failed to create websocket: {}", error)));
|
|
||||||
*res.status_mut() = StatusCode::BAD_REQUEST;
|
|
||||||
return Ok(res);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok::<_, Infallible>(response)
|
|
||||||
}
|
|
||||||
// Request for Relay info
|
|
||||||
("/", false) => {
|
|
||||||
// handle request at root with no upgrade header
|
|
||||||
// Check if this is a nostr server info request
|
|
||||||
let accept_header = &request.headers().get(ACCEPT);
|
|
||||||
// check if application/nostr+json is included
|
|
||||||
if let Some(media_types) = accept_header {
|
|
||||||
if let Ok(mt_str) = media_types.to_str() {
|
|
||||||
if mt_str.contains("application/nostr+json") {
|
|
||||||
let config = config::SETTINGS.read().unwrap();
|
|
||||||
// build a relay info response
|
|
||||||
debug!("Responding to server info request");
|
|
||||||
let rinfo = RelayInfo::from(config.info.clone());
|
|
||||||
let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap());
|
|
||||||
return Ok(Response::builder()
|
|
||||||
.status(200)
|
|
||||||
.header("Content-Type", "application/nostr+json")
|
|
||||||
.body(b)
|
|
||||||
.unwrap());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Response::new(Body::from(
|
|
||||||
"Please use a Nostr client to connect.",
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
(_, _) => {
|
|
||||||
//handle any other url
|
|
||||||
Ok(Response::builder()
|
|
||||||
.status(StatusCode::NOT_FOUND)
|
|
||||||
.body(Body::from("Nothing here."))
|
|
||||||
.unwrap())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn shutdown_signal() {
|
|
||||||
// Wait for the CTRL+C signal
|
|
||||||
tokio::signal::ctrl_c()
|
|
||||||
.await
|
|
||||||
.expect("failed to install CTRL+C signal handler");
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Start running a Nostr relay server.
|
/// Start running a Nostr relay server.
|
||||||
fn main() -> Result<(), Error> {
|
fn main() {
|
||||||
// setup logger
|
// setup tracing
|
||||||
let _ = env_logger::try_init();
|
let _trace_sub = tracing_subscriber::fmt::try_init();
|
||||||
|
info!("Starting up from main");
|
||||||
// get database directory from args
|
// get database directory from args
|
||||||
let args: Vec<String> = env::args().collect();
|
let args: Vec<String> = env::args().collect();
|
||||||
let db_dir: Option<String> = db_from_args(args);
|
let db_dir: Option<String> = db_from_args(&args);
|
||||||
{
|
// configure settings from config.toml
|
||||||
let mut settings = config::SETTINGS.write().unwrap();
|
// replace default settings with those read from config.toml
|
||||||
// replace default settings with those read from config.toml
|
let mut settings = config::Settings::new();
|
||||||
let mut c = config::Settings::new();
|
|
||||||
// update with database location
|
if settings.diagnostics.tracing {
|
||||||
if let Some(db) = db_dir {
|
// enable tracing with tokio-console
|
||||||
c.database.data_directory = db;
|
ConsoleLayer::builder().with_default_env().init();
|
||||||
}
|
}
|
||||||
*settings = c;
|
// update with database location
|
||||||
|
if let Some(db) = db_dir {
|
||||||
|
settings.database.data_directory = db;
|
||||||
}
|
}
|
||||||
|
|
||||||
let settings = config::SETTINGS.read().unwrap();
|
let (_, ctrl_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
|
||||||
trace!("Config: {:?}", settings);
|
// run this in a new thread
|
||||||
// do some config validation.
|
let handle = thread::spawn(|| {
|
||||||
if !Path::new(&settings.database.data_directory).is_dir() {
|
// we should have a 'control plane' channel to monitor and bump the server.
|
||||||
error!("Database directory does not exist");
|
// this will let us do stuff like clear the database, shutdown, etc.
|
||||||
return Err(Error::DatabaseDirError);
|
let _svr = start_server(settings, ctrl_rx);
|
||||||
}
|
|
||||||
let addr = format!(
|
|
||||||
"{}:{}",
|
|
||||||
settings.network.address.trim(),
|
|
||||||
settings.network.port
|
|
||||||
);
|
|
||||||
let socket_addr = addr.parse().expect("listening address not valid");
|
|
||||||
// address whitelisting settings
|
|
||||||
if let Some(addr_whitelist) = &settings.authorization.pubkey_whitelist {
|
|
||||||
info!(
|
|
||||||
"Event publishing restricted to {} pubkey(s)",
|
|
||||||
addr_whitelist.len()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
// check if NIP-05 enforced user verification is on
|
|
||||||
if settings.verified_users.is_active() {
|
|
||||||
info!(
|
|
||||||
"NIP-05 user verification mode:{:?}",
|
|
||||||
settings.verified_users.mode
|
|
||||||
);
|
|
||||||
if let Some(d) = settings.verified_users.verify_update_duration() {
|
|
||||||
info!("NIP-05 check user verification every: {:?}", d);
|
|
||||||
}
|
|
||||||
if let Some(d) = settings.verified_users.verify_expiration_duration() {
|
|
||||||
info!("NIP-05 user verification expires after: {:?}", d);
|
|
||||||
}
|
|
||||||
if let Some(wl) = &settings.verified_users.domain_whitelist {
|
|
||||||
info!("NIP-05 domain whitelist: {:?}", wl);
|
|
||||||
}
|
|
||||||
if let Some(bl) = &settings.verified_users.domain_blacklist {
|
|
||||||
info!("NIP-05 domain blacklist: {:?}", bl);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// configure tokio runtime
|
|
||||||
let rt = Builder::new_multi_thread()
|
|
||||||
.enable_all()
|
|
||||||
.thread_name("tokio-ws")
|
|
||||||
.build()
|
|
||||||
.unwrap();
|
|
||||||
// start tokio
|
|
||||||
rt.block_on(async {
|
|
||||||
let settings = config::SETTINGS.read().unwrap();
|
|
||||||
info!("listening on: {}", socket_addr);
|
|
||||||
// all client-submitted valid events are broadcast to every
|
|
||||||
// other client on this channel. This should be large enough
|
|
||||||
// to accomodate slower readers (messages are dropped if
|
|
||||||
// clients can not keep up).
|
|
||||||
let (bcast_tx, _) = broadcast::channel::<Event>(settings.limits.broadcast_buffer);
|
|
||||||
// validated events that need to be persisted are sent to the
|
|
||||||
// database on via this channel.
|
|
||||||
let (event_tx, event_rx) =
|
|
||||||
mpsc::channel::<SubmittedEvent>(settings.limits.event_persist_buffer);
|
|
||||||
// establish a channel for letting all threads now about a
|
|
||||||
// requested server shutdown.
|
|
||||||
let (invoke_shutdown, shutdown_listen) = broadcast::channel::<()>(1);
|
|
||||||
// create a channel for sending any new metadata event. These
|
|
||||||
// will get processed relatively slowly (a potentially
|
|
||||||
// multi-second blocking HTTP call) on a single thread, so we
|
|
||||||
// buffer requests on the channel. No harm in dropping events
|
|
||||||
// here, since we are protecting against DoS. This can make
|
|
||||||
// it difficult to setup initial metadata in bulk, since
|
|
||||||
// overwhelming this will drop events and won't register
|
|
||||||
// metadata events.
|
|
||||||
let (metadata_tx, metadata_rx) = broadcast::channel::<Event>(4096);
|
|
||||||
// start the database writer thread. Give it a channel for
|
|
||||||
// writing events, and for publishing events that have been
|
|
||||||
// written (to all connected clients).
|
|
||||||
db::db_writer(
|
|
||||||
event_rx,
|
|
||||||
bcast_tx.clone(),
|
|
||||||
metadata_tx.clone(),
|
|
||||||
shutdown_listen,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
info!("db writer created");
|
|
||||||
|
|
||||||
// create a nip-05 verifier thread
|
|
||||||
let verifier_opt = nip05::Verifier::new(metadata_rx, bcast_tx.clone());
|
|
||||||
if let Ok(mut v) = verifier_opt {
|
|
||||||
if settings.verified_users.is_active() {
|
|
||||||
tokio::task::spawn(async move {
|
|
||||||
info!("starting up NIP-05 verifier...");
|
|
||||||
v.run().await;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// // listen for ctrl-c interruupts
|
|
||||||
let ctrl_c_shutdown = invoke_shutdown.clone();
|
|
||||||
tokio::spawn(async move {
|
|
||||||
tokio::signal::ctrl_c().await.unwrap();
|
|
||||||
info!("shutting down due to SIGINT");
|
|
||||||
ctrl_c_shutdown.send(()).ok();
|
|
||||||
});
|
|
||||||
// build a connection pool for sqlite connections
|
|
||||||
let pool = db::build_pool(
|
|
||||||
"client query",
|
|
||||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY
|
|
||||||
| rusqlite::OpenFlags::SQLITE_OPEN_SHARED_CACHE,
|
|
||||||
settings.database.min_conn,
|
|
||||||
settings.database.max_conn,
|
|
||||||
true,
|
|
||||||
);
|
|
||||||
// A `Service` is needed for every connection, so this
|
|
||||||
// creates one from our `handle_request` function.
|
|
||||||
let make_svc = make_service_fn(|conn: &AddrStream| {
|
|
||||||
let svc_pool = pool.clone();
|
|
||||||
let remote_addr = conn.remote_addr();
|
|
||||||
let bcast = bcast_tx.clone();
|
|
||||||
let event = event_tx.clone();
|
|
||||||
let stop = invoke_shutdown.clone();
|
|
||||||
async move {
|
|
||||||
// service_fn converts our function into a `Service`
|
|
||||||
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
|
|
||||||
handle_web_request(
|
|
||||||
request,
|
|
||||||
svc_pool.clone(),
|
|
||||||
remote_addr,
|
|
||||||
bcast.clone(),
|
|
||||||
event.clone(),
|
|
||||||
stop.subscribe(),
|
|
||||||
)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
});
|
|
||||||
let server = Server::bind(&socket_addr)
|
|
||||||
.serve(make_svc)
|
|
||||||
.with_graceful_shutdown(shutdown_signal());
|
|
||||||
// run hyper
|
|
||||||
if let Err(e) = server.await {
|
|
||||||
eprintln!("server error: {}", e);
|
|
||||||
}
|
|
||||||
// our code
|
|
||||||
});
|
});
|
||||||
Ok(())
|
// block on nostr thread to finish.
|
||||||
}
|
handle.join().unwrap();
|
||||||
|
|
||||||
/// Nostr protocol messages from a client
|
|
||||||
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
|
|
||||||
#[serde(untagged)]
|
|
||||||
pub enum NostrMessage {
|
|
||||||
/// An `EVENT` message
|
|
||||||
EventMsg(EventCmd),
|
|
||||||
/// A `REQ` message
|
|
||||||
SubMsg(Subscription),
|
|
||||||
/// A `CLOSE` message
|
|
||||||
CloseMsg(CloseCmd),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convert Message to NostrMessage
|
|
||||||
fn convert_to_msg(msg: String) -> Result<NostrMessage> {
|
|
||||||
let config = config::SETTINGS.read().unwrap();
|
|
||||||
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
|
|
||||||
match parsed_res {
|
|
||||||
Ok(m) => {
|
|
||||||
if let NostrMessage::EventMsg(_) = m {
|
|
||||||
if let Some(max_size) = config.limits.max_event_bytes {
|
|
||||||
// check length, ensure that some max size is set.
|
|
||||||
if msg.len() > max_size && max_size > 0 {
|
|
||||||
return Err(Error::EventMaxLengthError(msg.len()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(m)
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
debug!("proto parse error: {:?}", e);
|
|
||||||
debug!("parse error on message: {}", msg.trim());
|
|
||||||
Err(Error::ProtoParseError)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handle new client connections. This runs through an event loop
|
|
||||||
/// for all client communication.
|
|
||||||
async fn nostr_server(
|
|
||||||
pool: db::SqlitePool,
|
|
||||||
mut ws_stream: WebSocketStream<Upgraded>,
|
|
||||||
broadcast: Sender<Event>,
|
|
||||||
event_tx: mpsc::Sender<SubmittedEvent>,
|
|
||||||
mut shutdown: Receiver<()>,
|
|
||||||
) {
|
|
||||||
// get a broadcast channel for clients to communicate on
|
|
||||||
let mut bcast_rx = broadcast.subscribe();
|
|
||||||
// Track internal client state
|
|
||||||
let mut conn = conn::ClientConn::new();
|
|
||||||
let cid = conn.get_client_prefix();
|
|
||||||
// Create a channel for receiving query results from the database.
|
|
||||||
// we will send out the tx handle to any query we generate.
|
|
||||||
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(256);
|
|
||||||
// Create channel for receiving NOTICEs
|
|
||||||
let (notice_tx, mut notice_rx) = mpsc::channel::<String>(32);
|
|
||||||
|
|
||||||
// maintain a hashmap of a oneshot channel for active subscriptions.
|
|
||||||
// when these subscriptions are cancelled, make a message
|
|
||||||
// available to the executing query so it knows to stop.
|
|
||||||
|
|
||||||
// last time this client sent data
|
|
||||||
let mut last_message_time = Instant::now();
|
|
||||||
|
|
||||||
// ping interval (every 5 minutes)
|
|
||||||
let default_ping_dur = Duration::from_secs(300);
|
|
||||||
|
|
||||||
// disconnect after 20 minutes without a ping response or event.
|
|
||||||
let max_quiet_time = Duration::from_secs(60 * 20);
|
|
||||||
|
|
||||||
let start = tokio::time::Instant::now() + default_ping_dur;
|
|
||||||
let mut ping_interval = tokio::time::interval_at(start, default_ping_dur);
|
|
||||||
|
|
||||||
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
|
|
||||||
// for stats, keep track of how many events the client published,
|
|
||||||
// and how many it received from queries.
|
|
||||||
let mut client_published_event_count: usize = 0;
|
|
||||||
let mut client_received_event_count: usize = 0;
|
|
||||||
info!("new connection for client: {:?}", cid);
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
_ = shutdown.recv() => {
|
|
||||||
// server shutting down, exit loop
|
|
||||||
break;
|
|
||||||
},
|
|
||||||
_ = ping_interval.tick() => {
|
|
||||||
// check how long since we talked to client
|
|
||||||
// if it has been too long, disconnect
|
|
||||||
if last_message_time.elapsed() > max_quiet_time {
|
|
||||||
debug!("ending connection due to lack of client ping response");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Send a ping
|
|
||||||
ws_stream.send(Message::Ping(Vec::new())).await.ok();
|
|
||||||
},
|
|
||||||
Some(notice_msg) = notice_rx.recv() => {
|
|
||||||
let n = notice_msg.to_string().replace("\"", "");
|
|
||||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", n))).await.ok();
|
|
||||||
},
|
|
||||||
Some(query_result) = query_rx.recv() => {
|
|
||||||
// database informed us of a query result we asked for
|
|
||||||
client_received_event_count += 1;
|
|
||||||
// send a result
|
|
||||||
let subesc = query_result.sub_id.replace("\"", "");
|
|
||||||
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
|
|
||||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
|
||||||
},
|
|
||||||
// TODO: consider logging the LaggedRecv error
|
|
||||||
Ok(global_event) = bcast_rx.recv() => {
|
|
||||||
// an event has been broadcast to all clients
|
|
||||||
// first check if there is a subscription for this event.
|
|
||||||
let matching_subs = conn.get_matching_subscriptions(&global_event);
|
|
||||||
for s in matching_subs {
|
|
||||||
// TODO: serialize at broadcast time, instead of
|
|
||||||
// once for each consumer.
|
|
||||||
if let Ok(event_str) = serde_json::to_string(&global_event) {
|
|
||||||
debug!("sub match: client: {:?}, sub: {:?}, event: {:?}",
|
|
||||||
cid, s,
|
|
||||||
global_event.get_event_id_prefix());
|
|
||||||
// create an event response and send it
|
|
||||||
let subesc = s.replace("\"", "");
|
|
||||||
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{}\",{}]", subesc, event_str))).await.ok();
|
|
||||||
//nostr_stream.send(res).await.ok();
|
|
||||||
} else {
|
|
||||||
warn!("could not serialize event {:?}", global_event.get_event_id_prefix());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ws_next = ws_stream.next() => {
|
|
||||||
// update most recent message time for client
|
|
||||||
last_message_time = Instant::now();
|
|
||||||
// Consume text messages from the client, parse into Nostr messages.
|
|
||||||
let nostr_msg = match ws_next {
|
|
||||||
Some(Ok(Message::Text(m))) => {
|
|
||||||
convert_to_msg(m)
|
|
||||||
},
|
|
||||||
Some(Ok(Message::Binary(_))) => {
|
|
||||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "binary messages are not accepted"))).await.ok();
|
|
||||||
continue;
|
|
||||||
},
|
|
||||||
Some(Ok(Message::Ping(_))) | Some(Ok(Message::Pong(_))) => {
|
|
||||||
// get a ping/pong, ignore
|
|
||||||
continue;
|
|
||||||
},
|
|
||||||
None | Some(Ok(Message::Close(_))) | Some(Err(WsError::AlreadyClosed)) | Some(Err(WsError::ConnectionClosed)) => {
|
|
||||||
debug!("normal websocket close from client: {:?}",cid);
|
|
||||||
break;
|
|
||||||
},
|
|
||||||
x => {
|
|
||||||
info!("message was: {:?} (ignoring)", x);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// convert ws_next into proto_next
|
|
||||||
match nostr_msg {
|
|
||||||
Ok(NostrMessage::EventMsg(ec)) => {
|
|
||||||
// An EventCmd needs to be validated to be converted into an Event
|
|
||||||
// handle each type of message
|
|
||||||
let parsed : Result<Event> = Result::<Event>::from(ec);
|
|
||||||
match parsed {
|
|
||||||
Ok(e) => {
|
|
||||||
let id_prefix:String = e.id.chars().take(8).collect();
|
|
||||||
debug!("successfully parsed/validated event: {:?} from client: {:?}", id_prefix, cid);
|
|
||||||
// Write this to the database.
|
|
||||||
let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone() };
|
|
||||||
event_tx.send(submit_event).await.ok();
|
|
||||||
client_published_event_count += 1;
|
|
||||||
},
|
|
||||||
Err(_) => {
|
|
||||||
info!("client {:?} sent an invalid event", cid);
|
|
||||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "event was invalid"))).await.ok();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Ok(NostrMessage::SubMsg(s)) => {
|
|
||||||
debug!("client {} requesting a subscription", cid);
|
|
||||||
// subscription handling consists of:
|
|
||||||
// * registering the subscription so future events can be matched
|
|
||||||
// * making a channel to cancel to request later
|
|
||||||
// * sending a request for a SQL query
|
|
||||||
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
|
|
||||||
match conn.subscribe(s.clone()) {
|
|
||||||
Ok(()) => {
|
|
||||||
// when we insert, if there was a previous query running with the same name, cancel it.
|
|
||||||
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
|
|
||||||
previous_query.send(()).ok();
|
|
||||||
}
|
|
||||||
// start a database query
|
|
||||||
// show pool stats
|
|
||||||
debug!("DB pool stats: {:?}", pool.state());
|
|
||||||
db::db_query(s, pool.get().expect("could not get connection"), query_tx.clone(), abandon_query_rx).await;
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
info!("Subscription error: {}", e);
|
|
||||||
let s = e.to_string().replace("\"", "");
|
|
||||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", s))).await.ok();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Ok(NostrMessage::CloseMsg(cc)) => {
|
|
||||||
// closing a request simply removes the subscription.
|
|
||||||
let parsed : Result<Close> = Result::<Close>::from(cc);
|
|
||||||
match parsed {
|
|
||||||
Ok(c) => {
|
|
||||||
// check if a query is currently
|
|
||||||
// running, and remove it if so.
|
|
||||||
let stop_tx = running_queries.remove(&c.id);
|
|
||||||
if let Some(tx) = stop_tx {
|
|
||||||
tx.send(()).ok();
|
|
||||||
}
|
|
||||||
// stop checking new events against
|
|
||||||
// the subscription
|
|
||||||
conn.unsubscribe(c);
|
|
||||||
},
|
|
||||||
Err(_) => {
|
|
||||||
info!("invalid command ignored");
|
|
||||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "could not parse command"))).await.ok();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(Error::ConnError) => {
|
|
||||||
debug!("got connection close/error, disconnecting client: {:?}",cid);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Err(Error::EventMaxLengthError(s)) => {
|
|
||||||
info!("client {:?} sent event larger ({} bytes) than max size", cid, s);
|
|
||||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "event exceeded max size"))).await.ok();
|
|
||||||
},
|
|
||||||
Err(Error::ProtoParseError) => {
|
|
||||||
info!("client {:?} sent event that could not be parsed", cid);
|
|
||||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "could not parse command"))).await.ok();
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
info!("got non-fatal error from client: {:?}, error: {:?}", cid, e);
|
|
||||||
},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// connection cleanup - ensure any still running queries are terminated.
|
|
||||||
for (_, stop_tx) in running_queries.into_iter() {
|
|
||||||
stop_tx.send(()).ok();
|
|
||||||
}
|
|
||||||
info!(
|
|
||||||
"stopping connection for client: {:?} (client sent {} event(s), received {})",
|
|
||||||
cid, client_published_event_count, client_received_event_count
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
84
src/nip05.rs
84
src/nip05.rs
@@ -4,7 +4,7 @@
|
|||||||
//! address with their public key, in metadata events. This module
|
//! address with their public key, in metadata events. This module
|
||||||
//! consumes a stream of metadata events, and keeps a database table
|
//! consumes a stream of metadata events, and keeps a database table
|
||||||
//! updated with the current NIP-05 verification status.
|
//! updated with the current NIP-05 verification status.
|
||||||
use crate::config::SETTINGS;
|
use crate::config::VerifiedUsers;
|
||||||
use crate::db;
|
use crate::db;
|
||||||
use crate::error::{Error, Result};
|
use crate::error::{Error, Result};
|
||||||
use crate::event::Event;
|
use crate::event::Event;
|
||||||
@@ -13,13 +13,13 @@ use hyper::body::HttpBody;
|
|||||||
use hyper::client::connect::HttpConnector;
|
use hyper::client::connect::HttpConnector;
|
||||||
use hyper::Client;
|
use hyper::Client;
|
||||||
use hyper_tls::HttpsConnector;
|
use hyper_tls::HttpsConnector;
|
||||||
use log::*;
|
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use rusqlite::params;
|
use rusqlite::params;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
use tokio::time::Interval;
|
use tokio::time::Interval;
|
||||||
|
use tracing::{debug, info, warn};
|
||||||
|
|
||||||
/// NIP-05 verifier state
|
/// NIP-05 verifier state
|
||||||
pub struct Verifier {
|
pub struct Verifier {
|
||||||
@@ -31,6 +31,8 @@ pub struct Verifier {
|
|||||||
read_pool: db::SqlitePool,
|
read_pool: db::SqlitePool,
|
||||||
/// SQLite write query pool
|
/// SQLite write query pool
|
||||||
write_pool: db::SqlitePool,
|
write_pool: db::SqlitePool,
|
||||||
|
/// Settings
|
||||||
|
settings: crate::config::Settings,
|
||||||
/// HTTP client
|
/// HTTP client
|
||||||
client: hyper::Client<HttpsConnector<HttpConnector>, hyper::Body>,
|
client: hyper::Client<HttpsConnector<HttpConnector>, hyper::Body>,
|
||||||
/// After all accounts are updated, wait this long before checking again.
|
/// After all accounts are updated, wait this long before checking again.
|
||||||
@@ -42,7 +44,7 @@ pub struct Verifier {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A NIP-05 identifier is a local part and domain.
|
/// A NIP-05 identifier is a local part and domain.
|
||||||
#[derive(PartialEq, Debug, Clone)]
|
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||||
pub struct Nip05Name {
|
pub struct Nip05Name {
|
||||||
local: String,
|
local: String,
|
||||||
domain: String,
|
domain: String,
|
||||||
@@ -138,11 +140,13 @@ impl Verifier {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
|
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
|
||||||
event_tx: tokio::sync::broadcast::Sender<Event>,
|
event_tx: tokio::sync::broadcast::Sender<Event>,
|
||||||
|
settings: crate::config::Settings,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
info!("creating NIP-05 verifier");
|
info!("creating NIP-05 verifier");
|
||||||
// build a database connection for reading and writing.
|
// build a database connection for reading and writing.
|
||||||
let write_pool = db::build_pool(
|
let write_pool = db::build_pool(
|
||||||
"nip05 writer",
|
"nip05 writer",
|
||||||
|
&settings,
|
||||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE,
|
rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE,
|
||||||
1, // min conns
|
1, // min conns
|
||||||
4, // max conns
|
4, // max conns
|
||||||
@@ -150,6 +154,7 @@ impl Verifier {
|
|||||||
);
|
);
|
||||||
let read_pool = db::build_pool(
|
let read_pool = db::build_pool(
|
||||||
"nip05 reader",
|
"nip05 reader",
|
||||||
|
&settings,
|
||||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
|
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
|
||||||
1, // min conns
|
1, // min conns
|
||||||
8, // max conns
|
8, // max conns
|
||||||
@@ -174,6 +179,7 @@ impl Verifier {
|
|||||||
event_tx,
|
event_tx,
|
||||||
read_pool,
|
read_pool,
|
||||||
write_pool,
|
write_pool,
|
||||||
|
settings,
|
||||||
client,
|
client,
|
||||||
wait_after_finish,
|
wait_after_finish,
|
||||||
http_wait_duration,
|
http_wait_duration,
|
||||||
@@ -214,7 +220,11 @@ impl Verifier {
|
|||||||
pubkey: &str,
|
pubkey: &str,
|
||||||
) -> Result<UserWebVerificationStatus> {
|
) -> Result<UserWebVerificationStatus> {
|
||||||
// determine if this domain should be checked
|
// determine if this domain should be checked
|
||||||
if !is_domain_allowed(&nip.domain) {
|
if !is_domain_allowed(
|
||||||
|
&nip.domain,
|
||||||
|
&self.settings.verified_users.domain_whitelist,
|
||||||
|
&self.settings.verified_users.domain_blacklist,
|
||||||
|
) {
|
||||||
return Ok(UserWebVerificationStatus::DomainNotAllowed);
|
return Ok(UserWebVerificationStatus::DomainNotAllowed);
|
||||||
}
|
}
|
||||||
let url = nip
|
let url = nip
|
||||||
@@ -239,9 +249,9 @@ impl Verifier {
|
|||||||
// HTTP request with timeout
|
// HTTP request with timeout
|
||||||
match tokio::time::timeout(Duration::from_secs(5), response_fut).await {
|
match tokio::time::timeout(Duration::from_secs(5), response_fut).await {
|
||||||
Ok(response_res) => {
|
Ok(response_res) => {
|
||||||
let response = response_res?;
|
|
||||||
// limit size of verification document to 1MB.
|
// limit size of verification document to 1MB.
|
||||||
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
|
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
|
||||||
|
let response = response_res?;
|
||||||
// determine content length from response
|
// determine content length from response
|
||||||
let response_content_length = match response.body().size_hint().upper() {
|
let response_content_length = match response.body().size_hint().upper() {
|
||||||
Some(v) => v,
|
Some(v) => v,
|
||||||
@@ -257,12 +267,11 @@ impl Verifier {
|
|||||||
let body_matches = body_contains_user(&nip.local, pubkey, body_bytes)?;
|
let body_matches = body_contains_user(&nip.local, pubkey, body_bytes)?;
|
||||||
if body_matches {
|
if body_matches {
|
||||||
return Ok(UserWebVerificationStatus::Verified);
|
return Ok(UserWebVerificationStatus::Verified);
|
||||||
} else {
|
|
||||||
// successful response, parsed as a nip-05
|
|
||||||
// document, but this name/pubkey was not
|
|
||||||
// present.
|
|
||||||
return Ok(UserWebVerificationStatus::Unverified);
|
|
||||||
}
|
}
|
||||||
|
// successful response, parsed as a nip-05
|
||||||
|
// document, but this name/pubkey was not
|
||||||
|
// present.
|
||||||
|
return Ok(UserWebVerificationStatus::Unverified);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
info!(
|
info!(
|
||||||
@@ -347,15 +356,11 @@ impl Verifier {
|
|||||||
|
|
||||||
/// Reverify the oldest user verification record.
|
/// Reverify the oldest user verification record.
|
||||||
async fn do_reverify(&mut self) -> Result<()> {
|
async fn do_reverify(&mut self) -> Result<()> {
|
||||||
let reverify_setting;
|
let reverify_setting = self
|
||||||
let max_failures;
|
.settings
|
||||||
{
|
.verified_users
|
||||||
// this block prevents a read handle to settings being
|
.verify_update_frequency_duration;
|
||||||
// captured by the async DB call (guard is not Send)
|
let max_failures = self.settings.verified_users.max_consecutive_failures;
|
||||||
let settings = SETTINGS.read().unwrap();
|
|
||||||
reverify_setting = settings.verified_users.verify_update_frequency_duration;
|
|
||||||
max_failures = settings.verified_users.max_consecutive_failures;
|
|
||||||
}
|
|
||||||
// get from settings, but default to 6hrs between re-checking an account
|
// get from settings, but default to 6hrs between re-checking an account
|
||||||
let reverify_dur = reverify_setting.unwrap_or_else(|| Duration::from_secs(60 * 60 * 6));
|
let reverify_dur = reverify_setting.unwrap_or_else(|| Duration::from_secs(60 * 60 * 6));
|
||||||
// find all verification records that have success or failure OLDER than the reverify_dur.
|
// find all verification records that have success or failure OLDER than the reverify_dur.
|
||||||
@@ -506,17 +511,13 @@ impl Verifier {
|
|||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
// we should only do this if we are enabled. if we are
|
// we should only do this if we are enabled. if we are
|
||||||
// disabled/passive, the event has already been persisted.
|
// disabled/passive, the event has already been persisted.
|
||||||
let should_write_event;
|
let should_write_event = self.settings.verified_users.is_enabled();
|
||||||
{
|
|
||||||
let settings = SETTINGS.read().unwrap();
|
|
||||||
should_write_event = settings.verified_users.is_enabled()
|
|
||||||
}
|
|
||||||
if should_write_event {
|
if should_write_event {
|
||||||
match db::write_event(&mut self.write_pool.get()?, event) {
|
match db::write_event(&mut self.write_pool.get()?, event) {
|
||||||
Ok(updated) => {
|
Ok(updated) => {
|
||||||
if updated != 0 {
|
if updated != 0 {
|
||||||
info!(
|
info!(
|
||||||
"persisted event: {:?} in {:?}",
|
"persisted event (new verified pubkey): {:?} in {:?}",
|
||||||
event.get_event_id_prefix(),
|
event.get_event_id_prefix(),
|
||||||
start.elapsed()
|
start.elapsed()
|
||||||
);
|
);
|
||||||
@@ -538,7 +539,7 @@ impl Verifier {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Result of checking user's verification status against DNS/HTTP.
|
/// Result of checking user's verification status against DNS/HTTP.
|
||||||
#[derive(PartialEq, Debug, Clone)]
|
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||||
pub enum UserWebVerificationStatus {
|
pub enum UserWebVerificationStatus {
|
||||||
Verified, // user is verified, as of now.
|
Verified, // user is verified, as of now.
|
||||||
DomainNotAllowed, // domain blacklist or whitelist denied us from attempting a verification
|
DomainNotAllowed, // domain blacklist or whitelist denied us from attempting a verification
|
||||||
@@ -547,7 +548,7 @@ pub enum UserWebVerificationStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A NIP-05 verification record.
|
/// A NIP-05 verification record.
|
||||||
#[derive(PartialEq, Debug, Clone)]
|
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||||
// Basic information for a verification event. Gives us all we need to assert a NIP-05 address is good.
|
// Basic information for a verification event. Gives us all we need to assert a NIP-05 address is good.
|
||||||
pub struct VerificationRecord {
|
pub struct VerificationRecord {
|
||||||
pub rowid: u64, // database row for this verification event
|
pub rowid: u64, // database row for this verification event
|
||||||
@@ -562,15 +563,18 @@ pub struct VerificationRecord {
|
|||||||
|
|
||||||
/// Check with settings to determine if a given domain is allowed to
|
/// Check with settings to determine if a given domain is allowed to
|
||||||
/// publish.
|
/// publish.
|
||||||
pub fn is_domain_allowed(domain: &str) -> bool {
|
pub fn is_domain_allowed(
|
||||||
let settings = SETTINGS.read().unwrap();
|
domain: &str,
|
||||||
|
whitelist: &Option<Vec<String>>,
|
||||||
|
blacklist: &Option<Vec<String>>,
|
||||||
|
) -> bool {
|
||||||
// if there is a whitelist, domain must be present in it.
|
// if there is a whitelist, domain must be present in it.
|
||||||
if let Some(wl) = &settings.verified_users.domain_whitelist {
|
if let Some(wl) = whitelist {
|
||||||
// workaround for Vec contains not accepting &str
|
// workaround for Vec contains not accepting &str
|
||||||
return wl.iter().any(|x| x == domain);
|
return wl.iter().any(|x| x == domain);
|
||||||
}
|
}
|
||||||
// otherwise, check that user is not in the blacklist
|
// otherwise, check that user is not in the blacklist
|
||||||
if let Some(bl) = &settings.verified_users.domain_blacklist {
|
if let Some(bl) = blacklist {
|
||||||
return !bl.iter().any(|x| x == domain);
|
return !bl.iter().any(|x| x == domain);
|
||||||
}
|
}
|
||||||
true
|
true
|
||||||
@@ -579,17 +583,21 @@ pub fn is_domain_allowed(domain: &str) -> bool {
|
|||||||
impl VerificationRecord {
|
impl VerificationRecord {
|
||||||
/// Check if the record is recent enough to be considered valid,
|
/// Check if the record is recent enough to be considered valid,
|
||||||
/// and the domain is allowed.
|
/// and the domain is allowed.
|
||||||
pub fn is_valid(&self) -> bool {
|
pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
|
||||||
let settings = SETTINGS.read().unwrap();
|
//let settings = SETTINGS.read().unwrap();
|
||||||
// how long a verification record is good for
|
// how long a verification record is good for
|
||||||
let nip05_expiration = &settings.verified_users.verify_expiration_duration;
|
let nip05_expiration = &verified_users_settings.verify_expiration_duration;
|
||||||
if let Some(e) = nip05_expiration {
|
if let Some(e) = nip05_expiration {
|
||||||
if !self.is_current(e) {
|
if !self.is_current(e) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// check domains
|
// check domains
|
||||||
is_domain_allowed(&self.name.domain)
|
is_domain_allowed(
|
||||||
|
&self.name.domain,
|
||||||
|
&verified_users_settings.domain_whitelist,
|
||||||
|
&verified_users_settings.domain_blacklist,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if this record has been validated since the given
|
/// Check if this record has been validated since the given
|
||||||
@@ -705,9 +713,7 @@ pub async fn get_oldest_user_verification(
|
|||||||
conn: db::PooledConnection,
|
conn: db::PooledConnection,
|
||||||
earliest: u64,
|
earliest: u64,
|
||||||
) -> Result<VerificationRecord> {
|
) -> Result<VerificationRecord> {
|
||||||
let res =
|
tokio::task::spawn_blocking(move || query_oldest_user_verification(conn, earliest)).await?
|
||||||
tokio::task::spawn_blocking(move || query_oldest_user_verification(conn, earliest)).await?;
|
|
||||||
res
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn query_oldest_user_verification(
|
pub fn query_oldest_user_verification(
|
||||||
@@ -715,7 +721,7 @@ pub fn query_oldest_user_verification(
|
|||||||
earliest: u64,
|
earliest: u64,
|
||||||
) -> Result<VerificationRecord> {
|
) -> Result<VerificationRecord> {
|
||||||
let tx = conn.transaction()?;
|
let tx = conn.transaction()?;
|
||||||
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
|
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v INNER JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
|
||||||
let mut stmt = tx.prepare_cached(query)?;
|
let mut stmt = tx.prepare_cached(query)?;
|
||||||
let fields = stmt.query_row(params![earliest, earliest], |r| {
|
let fields = stmt.query_row(params![earliest, earliest], |r| {
|
||||||
let rowid: u64 = r.get(0)?;
|
let rowid: u64 = r.get(0)?;
|
||||||
|
86
src/notice.rs
Normal file
86
src/notice.rs
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
pub enum EventResultStatus {
|
||||||
|
Saved,
|
||||||
|
Duplicate,
|
||||||
|
Invalid,
|
||||||
|
Blocked,
|
||||||
|
RateLimited,
|
||||||
|
Error,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct EventResult {
|
||||||
|
pub id: String,
|
||||||
|
pub msg: String,
|
||||||
|
pub status: EventResultStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum Notice {
|
||||||
|
Message(String),
|
||||||
|
EventResult(EventResult),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EventResultStatus {
|
||||||
|
pub fn to_bool(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
Self::Saved => true,
|
||||||
|
Self::Duplicate => true,
|
||||||
|
Self::Invalid => false,
|
||||||
|
Self::Blocked => false,
|
||||||
|
Self::RateLimited => false,
|
||||||
|
Self::Error => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn prefix(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
Self::Saved => "saved",
|
||||||
|
Self::Duplicate => "duplicate",
|
||||||
|
Self::Invalid => "invalid",
|
||||||
|
Self::Blocked => "blocked",
|
||||||
|
Self::RateLimited => "rate-limited",
|
||||||
|
Self::Error => "error",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Notice {
|
||||||
|
//pub fn err(err: error::Error, id: String) -> Notice {
|
||||||
|
// Notice::err_msg(format!("{}", err), id)
|
||||||
|
//}
|
||||||
|
|
||||||
|
pub fn message(msg: String) -> Notice {
|
||||||
|
Notice::Message(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn prefixed(id: String, msg: &str, status: EventResultStatus) -> Notice {
|
||||||
|
let msg = format!("{}: {}", status.prefix(), msg);
|
||||||
|
Notice::EventResult(EventResult { id, msg, status })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn invalid(id: String, msg: &str) -> Notice {
|
||||||
|
Notice::prefixed(id, msg, EventResultStatus::Invalid)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn blocked(id: String, msg: &str) -> Notice {
|
||||||
|
Notice::prefixed(id, msg, EventResultStatus::Blocked)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn rate_limited(id: String, msg: &str) -> Notice {
|
||||||
|
Notice::prefixed(id, msg, EventResultStatus::RateLimited)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn duplicate(id: String) -> Notice {
|
||||||
|
Notice::prefixed(id, "", EventResultStatus::Duplicate)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn error(id: String, msg: &str) -> Notice {
|
||||||
|
Notice::prefixed(id, msg, EventResultStatus::Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn saved(id: String) -> Notice {
|
||||||
|
Notice::EventResult(EventResult {
|
||||||
|
id,
|
||||||
|
msg: "".into(),
|
||||||
|
status: EventResultStatus::Saved,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
436
src/schema.rs
436
src/schema.rs
@@ -1,30 +1,37 @@
|
|||||||
//! Database schema and migrations
|
//! Database schema and migrations
|
||||||
use crate::db::PooledConnection;
|
use crate::db::PooledConnection;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::utils::is_hex;
|
use crate::event::{single_char_tagname, Event};
|
||||||
use log::*;
|
use crate::utils::is_lower_hex;
|
||||||
|
use const_format::formatcp;
|
||||||
use rusqlite::limits::Limit;
|
use rusqlite::limits::Limit;
|
||||||
use rusqlite::params;
|
use rusqlite::params;
|
||||||
use rusqlite::Connection;
|
use rusqlite::Connection;
|
||||||
|
use std::cmp::Ordering;
|
||||||
// TODO: drop the pubkey_ref and event_ref tables
|
use std::time::Instant;
|
||||||
|
use tracing::{debug, error, info};
|
||||||
|
|
||||||
/// Startup DB Pragmas
|
/// Startup DB Pragmas
|
||||||
pub const STARTUP_SQL: &str = r##"
|
pub const STARTUP_SQL: &str = r##"
|
||||||
PRAGMA main.synchronous=NORMAL;
|
PRAGMA main.synchronous=NORMAL;
|
||||||
PRAGMA foreign_keys = ON;
|
PRAGMA foreign_keys = ON;
|
||||||
pragma mmap_size = 536870912; -- 512MB of mmap
|
PRAGMA journal_size_limit=32768;
|
||||||
|
pragma mmap_size = 17179869184; -- cap mmap at 16GB
|
||||||
"##;
|
"##;
|
||||||
|
|
||||||
|
/// Latest database version
|
||||||
|
pub const DB_VERSION: usize = 11;
|
||||||
|
|
||||||
/// Schema definition
|
/// Schema definition
|
||||||
const INIT_SQL: &str = r##"
|
const INIT_SQL: &str = formatcp!(
|
||||||
|
r##"
|
||||||
-- Database settings
|
-- Database settings
|
||||||
PRAGMA encoding = "UTF-8";
|
PRAGMA encoding = "UTF-8";
|
||||||
PRAGMA journal_mode=WAL;
|
PRAGMA journal_mode=WAL;
|
||||||
PRAGMA main.synchronous=NORMAL;
|
PRAGMA main.synchronous=NORMAL;
|
||||||
PRAGMA foreign_keys = ON;
|
PRAGMA foreign_keys = ON;
|
||||||
PRAGMA application_id = 1654008667;
|
PRAGMA application_id = 1654008667;
|
||||||
PRAGMA user_version = 5;
|
PRAGMA user_version = {};
|
||||||
|
|
||||||
-- Event Table
|
-- Event Table
|
||||||
CREATE TABLE IF NOT EXISTS event (
|
CREATE TABLE IF NOT EXISTS event (
|
||||||
@@ -33,6 +40,7 @@ event_hash BLOB NOT NULL, -- 4-byte hash
|
|||||||
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970)
|
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970)
|
||||||
created_at INTEGER NOT NULL, -- when the event was authored
|
created_at INTEGER NOT NULL, -- when the event was authored
|
||||||
author BLOB NOT NULL, -- author pubkey
|
author BLOB NOT NULL, -- author pubkey
|
||||||
|
delegated_by BLOB, -- delegator pubkey (NIP-26)
|
||||||
kind INTEGER NOT NULL, -- event kind
|
kind INTEGER NOT NULL, -- event kind
|
||||||
hidden INTEGER, -- relevant for queries
|
hidden INTEGER, -- relevant for queries
|
||||||
content TEXT NOT NULL -- serialized json of event object
|
content TEXT NOT NULL -- serialized json of event object
|
||||||
@@ -40,9 +48,10 @@ content TEXT NOT NULL -- serialized json of event object
|
|||||||
|
|
||||||
-- Event Indexes
|
-- Event Indexes
|
||||||
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
|
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
|
||||||
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
|
|
||||||
CREATE INDEX IF NOT EXISTS author_index ON event(author);
|
CREATE INDEX IF NOT EXISTS author_index ON event(author);
|
||||||
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
|
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
|
||||||
|
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
|
||||||
|
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
|
||||||
|
|
||||||
-- Tag Table
|
-- Tag Table
|
||||||
-- Tag values are stored as either a BLOB (if they come in as a
|
-- Tag values are stored as either a BLOB (if they come in as a
|
||||||
@@ -53,11 +62,13 @@ id INTEGER PRIMARY KEY,
|
|||||||
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
||||||
name TEXT, -- the tag name ("p", "e", whatever)
|
name TEXT, -- the tag name ("p", "e", whatever)
|
||||||
value TEXT, -- the tag value, if not hex.
|
value TEXT, -- the tag value, if not hex.
|
||||||
value_hex BLOB, -- the tag value, if it can be interpreted as a hex string.
|
value_hex BLOB, -- the tag value, if it can be interpreted as a lowercase hex string.
|
||||||
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||||
);
|
);
|
||||||
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
|
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
|
||||||
CREATE INDEX IF NOT EXISTS tag_val_hex_index ON tag(value_hex);
|
CREATE INDEX IF NOT EXISTS tag_val_hex_index ON tag(value_hex);
|
||||||
|
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
|
||||||
|
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
|
||||||
|
|
||||||
-- NIP-05 User Validation
|
-- NIP-05 User Validation
|
||||||
CREATE TABLE IF NOT EXISTS user_verification (
|
CREATE TABLE IF NOT EXISTS user_verification (
|
||||||
@@ -71,19 +82,37 @@ FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CAS
|
|||||||
);
|
);
|
||||||
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
|
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
|
||||||
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
|
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
|
||||||
"##;
|
"##,
|
||||||
|
DB_VERSION
|
||||||
|
);
|
||||||
|
|
||||||
/// Determine the current application database schema version.
|
/// Determine the current application database schema version.
|
||||||
pub fn db_version(conn: &mut Connection) -> Result<usize> {
|
pub fn curr_db_version(conn: &mut Connection) -> Result<usize> {
|
||||||
let query = "PRAGMA user_version;";
|
let query = "PRAGMA user_version;";
|
||||||
let curr_version = conn.query_row(query, [], |row| row.get(0))?;
|
let curr_version = conn.query_row(query, [], |row| row.get(0))?;
|
||||||
Ok(curr_version)
|
Ok(curr_version)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn mig_init(conn: &mut PooledConnection) -> Result<usize> {
|
||||||
|
match conn.execute_batch(INIT_SQL) {
|
||||||
|
Ok(()) => {
|
||||||
|
info!(
|
||||||
|
"database pragma/schema initialized to v{}, and ready",
|
||||||
|
DB_VERSION
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
error!("update failed: {}", err);
|
||||||
|
panic!("database could not be initialized");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(DB_VERSION)
|
||||||
|
}
|
||||||
|
|
||||||
/// Upgrade DB to latest version, and execute pragma settings
|
/// Upgrade DB to latest version, and execute pragma settings
|
||||||
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||||
// check the version.
|
// check the version.
|
||||||
let mut curr_version = db_version(conn)?;
|
let mut curr_version = curr_db_version(conn)?;
|
||||||
info!("DB version = {:?}", curr_version);
|
info!("DB version = {:?}", curr_version);
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
@@ -99,40 +128,101 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
|||||||
(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH) as f64 / (1024 * 1024) as f64).floor()
|
(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH) as f64 / (1024 * 1024) as f64).floor()
|
||||||
);
|
);
|
||||||
|
|
||||||
// initialize from scratch
|
match curr_version.cmp(&DB_VERSION) {
|
||||||
if curr_version == 0 {
|
// Database is new or not current
|
||||||
match conn.execute_batch(INIT_SQL) {
|
Ordering::Less => {
|
||||||
Ok(()) => {
|
// initialize from scratch
|
||||||
info!("database pragma/schema initialized to v4, and ready");
|
if curr_version == 0 {
|
||||||
|
curr_version = mig_init(conn)?;
|
||||||
}
|
}
|
||||||
Err(err) => {
|
// for initialized but out-of-date schemas, proceed to
|
||||||
error!("update failed: {}", err);
|
// upgrade sequentially until we are current.
|
||||||
panic!("database could not be initialized");
|
if curr_version == 1 {
|
||||||
|
curr_version = mig_1_to_2(conn)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if curr_version == 2 {
|
||||||
|
curr_version = mig_2_to_3(conn)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if curr_version == 3 {
|
||||||
|
curr_version = mig_3_to_4(conn)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if curr_version == 4 {
|
||||||
|
curr_version = mig_4_to_5(conn)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if curr_version == 5 {
|
||||||
|
curr_version = mig_5_to_6(conn)?;
|
||||||
|
}
|
||||||
|
if curr_version == 6 {
|
||||||
|
curr_version = mig_6_to_7(conn)?;
|
||||||
|
}
|
||||||
|
if curr_version == 7 {
|
||||||
|
curr_version = mig_7_to_8(conn)?;
|
||||||
|
}
|
||||||
|
if curr_version == 8 {
|
||||||
|
curr_version = mig_8_to_9(conn)?;
|
||||||
|
}
|
||||||
|
if curr_version == 9 {
|
||||||
|
curr_version = mig_9_to_10(conn)?;
|
||||||
|
}
|
||||||
|
if curr_version == 10 {
|
||||||
|
curr_version = mig_10_to_11(conn)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if curr_version == DB_VERSION {
|
||||||
|
info!(
|
||||||
|
"All migration scripts completed successfully. Welcome to v{}.",
|
||||||
|
DB_VERSION
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Database is current, all is good
|
||||||
|
Ordering::Equal => {
|
||||||
|
debug!("Database version was already current (v{})", DB_VERSION);
|
||||||
|
}
|
||||||
|
// Database is newer than what this code understands, abort
|
||||||
|
Ordering::Greater => {
|
||||||
|
panic!(
|
||||||
|
"Database version is newer than supported by this executable (v{} > v{})",
|
||||||
|
curr_version, DB_VERSION
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if curr_version == 1 {
|
|
||||||
// only change is adding a hidden column to events.
|
// Setup PRAGMA
|
||||||
let upgrade_sql = r##"
|
conn.execute_batch(STARTUP_SQL)?;
|
||||||
|
debug!("SQLite PRAGMA startup completed");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
//// Migration Scripts
|
||||||
|
|
||||||
|
fn mig_1_to_2(conn: &mut PooledConnection) -> Result<usize> {
|
||||||
|
// only change is adding a hidden column to events.
|
||||||
|
let upgrade_sql = r##"
|
||||||
ALTER TABLE event ADD hidden INTEGER;
|
ALTER TABLE event ADD hidden INTEGER;
|
||||||
UPDATE event SET hidden=FALSE;
|
UPDATE event SET hidden=FALSE;
|
||||||
PRAGMA user_version = 2;
|
PRAGMA user_version = 2;
|
||||||
"##;
|
"##;
|
||||||
match conn.execute_batch(upgrade_sql) {
|
match conn.execute_batch(upgrade_sql) {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
info!("database schema upgraded v1 -> v2");
|
info!("database schema upgraded v1 -> v2");
|
||||||
curr_version = 2;
|
}
|
||||||
}
|
Err(err) => {
|
||||||
Err(err) => {
|
error!("update failed: {}", err);
|
||||||
error!("update failed: {}", err);
|
panic!("database could not be upgraded");
|
||||||
panic!("database could not be upgraded");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if curr_version == 2 {
|
Ok(2)
|
||||||
// this version lacks the tag column
|
}
|
||||||
info!("database schema needs update from 2->3");
|
|
||||||
let upgrade_sql = r##"
|
fn mig_2_to_3(conn: &mut PooledConnection) -> Result<usize> {
|
||||||
|
// this version lacks the tag column
|
||||||
|
info!("database schema needs update from 2->3");
|
||||||
|
let upgrade_sql = r##"
|
||||||
CREATE TABLE IF NOT EXISTS tag (
|
CREATE TABLE IF NOT EXISTS tag (
|
||||||
id INTEGER PRIMARY KEY,
|
id INTEGER PRIMARY KEY,
|
||||||
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
||||||
@@ -143,43 +233,43 @@ FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
|||||||
);
|
);
|
||||||
PRAGMA user_version = 3;
|
PRAGMA user_version = 3;
|
||||||
"##;
|
"##;
|
||||||
// TODO: load existing refs into tag table
|
// TODO: load existing refs into tag table
|
||||||
match conn.execute_batch(upgrade_sql) {
|
match conn.execute_batch(upgrade_sql) {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
info!("database schema upgraded v2 -> v3");
|
info!("database schema upgraded v2 -> v3");
|
||||||
curr_version = 3;
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
error!("update failed: {}", err);
|
|
||||||
panic!("database could not be upgraded");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
info!("Starting transaction");
|
Err(err) => {
|
||||||
// iterate over every event/pubkey tag
|
error!("update failed: {}", err);
|
||||||
let tx = conn.transaction()?;
|
panic!("database could not be upgraded");
|
||||||
{
|
|
||||||
let mut stmt = tx.prepare("select event_id, \"e\", lower(hex(referenced_event)) from event_ref union select event_id, \"p\", lower(hex(referenced_pubkey)) from pubkey_ref;")?;
|
|
||||||
let mut tag_rows = stmt.query([])?;
|
|
||||||
while let Some(row) = tag_rows.next()? {
|
|
||||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
|
||||||
let event_id: u64 = row.get(0)?;
|
|
||||||
let tag_name: String = row.get(1)?;
|
|
||||||
let tag_value: String = row.get(2)?;
|
|
||||||
// this will leave behind p/e tags that were non-hex, but they are invalid anyways.
|
|
||||||
if is_hex(&tag_value) {
|
|
||||||
tx.execute(
|
|
||||||
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
|
|
||||||
params![event_id, tag_name, hex::decode(&tag_value).ok()],
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
tx.commit()?;
|
|
||||||
info!("Upgrade complete");
|
|
||||||
}
|
}
|
||||||
if curr_version == 3 {
|
// iterate over every event/pubkey tag
|
||||||
info!("database schema needs update from 3->4");
|
let tx = conn.transaction()?;
|
||||||
let upgrade_sql = r##"
|
{
|
||||||
|
let mut stmt = tx.prepare("select event_id, \"e\", lower(hex(referenced_event)) from event_ref union select event_id, \"p\", lower(hex(referenced_pubkey)) from pubkey_ref;")?;
|
||||||
|
let mut tag_rows = stmt.query([])?;
|
||||||
|
while let Some(row) = tag_rows.next()? {
|
||||||
|
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||||
|
let event_id: u64 = row.get(0)?;
|
||||||
|
let tag_name: String = row.get(1)?;
|
||||||
|
let tag_value: String = row.get(2)?;
|
||||||
|
// this will leave behind p/e tags that were non-hex, but they are invalid anyways.
|
||||||
|
if is_lower_hex(&tag_value) {
|
||||||
|
tx.execute(
|
||||||
|
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
|
||||||
|
params![event_id, tag_name, hex::decode(&tag_value).ok()],
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info!("Updated tag values");
|
||||||
|
tx.commit()?;
|
||||||
|
Ok(3)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mig_3_to_4(conn: &mut PooledConnection) -> Result<usize> {
|
||||||
|
info!("database schema needs update from 3->4");
|
||||||
|
let upgrade_sql = r##"
|
||||||
-- incoming metadata events with nip05
|
-- incoming metadata events with nip05
|
||||||
CREATE TABLE IF NOT EXISTS user_verification (
|
CREATE TABLE IF NOT EXISTS user_verification (
|
||||||
id INTEGER PRIMARY KEY,
|
id INTEGER PRIMARY KEY,
|
||||||
@@ -194,44 +284,188 @@ CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(nam
|
|||||||
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
|
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
|
||||||
PRAGMA user_version = 4;
|
PRAGMA user_version = 4;
|
||||||
"##;
|
"##;
|
||||||
match conn.execute_batch(upgrade_sql) {
|
match conn.execute_batch(upgrade_sql) {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
info!("database schema upgraded v3 -> v4");
|
info!("database schema upgraded v3 -> v4");
|
||||||
curr_version = 4;
|
}
|
||||||
}
|
Err(err) => {
|
||||||
Err(err) => {
|
error!("update failed: {}", err);
|
||||||
error!("update failed: {}", err);
|
panic!("database could not be upgraded");
|
||||||
panic!("database could not be upgraded");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(4)
|
||||||
|
}
|
||||||
|
|
||||||
if curr_version == 4 {
|
fn mig_4_to_5(conn: &mut PooledConnection) -> Result<usize> {
|
||||||
info!("database schema needs update from 4->5");
|
info!("database schema needs update from 4->5");
|
||||||
let upgrade_sql = r##"
|
let upgrade_sql = r##"
|
||||||
DROP TABLE IF EXISTS event_ref;
|
DROP TABLE IF EXISTS event_ref;
|
||||||
DROP TABLE IF EXISTS pubkey_ref;
|
DROP TABLE IF EXISTS pubkey_ref;
|
||||||
PRAGMA user_version=5;
|
PRAGMA user_version=5;
|
||||||
"##;
|
"##;
|
||||||
match conn.execute_batch(upgrade_sql) {
|
match conn.execute_batch(upgrade_sql) {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
info!("database schema upgraded v4 -> v5");
|
info!("database schema upgraded v4 -> v5");
|
||||||
// uncomment if we have a newer version
|
}
|
||||||
//curr_version = 5;
|
Err(err) => {
|
||||||
}
|
error!("update failed: {}", err);
|
||||||
Err(err) => {
|
panic!("database could not be upgraded");
|
||||||
error!("update failed: {}", err);
|
}
|
||||||
panic!("database could not be upgraded");
|
}
|
||||||
|
Ok(5)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mig_5_to_6(conn: &mut PooledConnection) -> Result<usize> {
|
||||||
|
info!("database schema needs update from 5->6");
|
||||||
|
// We need to rebuild the tags table. iterate through the
|
||||||
|
// event table. build event from json, insert tags into a
|
||||||
|
// fresh tag table. This was needed due to a logic error in
|
||||||
|
// how hex-like tags got indexed.
|
||||||
|
let start = Instant::now();
|
||||||
|
let tx = conn.transaction()?;
|
||||||
|
{
|
||||||
|
// Clear out table
|
||||||
|
tx.execute("DELETE FROM tag;", [])?;
|
||||||
|
let mut stmt = tx.prepare("select id, content from event order by id;")?;
|
||||||
|
let mut tag_rows = stmt.query([])?;
|
||||||
|
while let Some(row) = tag_rows.next()? {
|
||||||
|
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||||
|
let event_id: u64 = row.get(0)?;
|
||||||
|
let event_json: String = row.get(1)?;
|
||||||
|
let event: Event = serde_json::from_str(&event_json)?;
|
||||||
|
// look at each event, and each tag, creating new tag entries if appropriate.
|
||||||
|
for t in event.tags.iter().filter(|x| x.len() > 1) {
|
||||||
|
let tagname = t.get(0).unwrap();
|
||||||
|
let tagnamechar_opt = single_char_tagname(tagname);
|
||||||
|
if tagnamechar_opt.is_none() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// safe because len was > 1
|
||||||
|
let tagval = t.get(1).unwrap();
|
||||||
|
// insert as BLOB if we can restore it losslessly.
|
||||||
|
// this means it needs to be even length and lowercase.
|
||||||
|
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
|
||||||
|
tx.execute(
|
||||||
|
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
|
||||||
|
params![event_id, tagname, hex::decode(tagval).ok()],
|
||||||
|
)?;
|
||||||
|
} else {
|
||||||
|
// otherwise, insert as text
|
||||||
|
tx.execute(
|
||||||
|
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
|
||||||
|
params![event_id, tagname, &tagval],
|
||||||
|
)?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if curr_version == 5 {
|
tx.execute("PRAGMA user_version = 6;", [])?;
|
||||||
debug!("Database version was already current");
|
|
||||||
} else if curr_version > 5 {
|
|
||||||
panic!("Database version is newer than supported by this executable");
|
|
||||||
}
|
}
|
||||||
|
tx.commit()?;
|
||||||
// Setup PRAGMA
|
info!("database schema upgraded v5 -> v6 in {:?}", start.elapsed());
|
||||||
conn.execute_batch(STARTUP_SQL)?;
|
// vacuum after large table modification
|
||||||
debug!("SQLite PRAGMA startup completed");
|
let start = Instant::now();
|
||||||
Ok(())
|
conn.execute("VACUUM;", [])?;
|
||||||
|
info!("vacuumed DB after tags rebuild in {:?}", start.elapsed());
|
||||||
|
Ok(6)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mig_6_to_7(conn: &mut PooledConnection) -> Result<usize> {
|
||||||
|
info!("database schema needs update from 6->7");
|
||||||
|
// only change is adding a hidden column to events.
|
||||||
|
let upgrade_sql = r##"
|
||||||
|
ALTER TABLE event ADD delegated_by BLOB;
|
||||||
|
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
|
||||||
|
PRAGMA user_version = 7;
|
||||||
|
"##;
|
||||||
|
match conn.execute_batch(upgrade_sql) {
|
||||||
|
Ok(()) => {
|
||||||
|
info!("database schema upgraded v6 -> v7");
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
error!("update failed: {}", err);
|
||||||
|
panic!("database could not be upgraded");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(7)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mig_7_to_8(conn: &mut PooledConnection) -> Result<usize> {
|
||||||
|
info!("database schema needs update from 7->8");
|
||||||
|
// Remove redundant indexes, and add a better multi-column index.
|
||||||
|
let upgrade_sql = r##"
|
||||||
|
DROP INDEX IF EXISTS created_at_index;
|
||||||
|
DROP INDEX IF EXISTS kind_index;
|
||||||
|
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
|
||||||
|
PRAGMA user_version = 8;
|
||||||
|
"##;
|
||||||
|
match conn.execute_batch(upgrade_sql) {
|
||||||
|
Ok(()) => {
|
||||||
|
info!("database schema upgraded v7 -> v8");
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
error!("update failed: {}", err);
|
||||||
|
panic!("database could not be upgraded");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(8)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mig_8_to_9(conn: &mut PooledConnection) -> Result<usize> {
|
||||||
|
info!("database schema needs update from 8->9");
|
||||||
|
// Those old indexes were actually helpful...
|
||||||
|
let upgrade_sql = r##"
|
||||||
|
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
|
||||||
|
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
|
||||||
|
PRAGMA user_version = 9;
|
||||||
|
"##;
|
||||||
|
match conn.execute_batch(upgrade_sql) {
|
||||||
|
Ok(()) => {
|
||||||
|
info!("database schema upgraded v8 -> v9");
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
error!("update failed: {}", err);
|
||||||
|
panic!("database could not be upgraded");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(9)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mig_9_to_10(conn: &mut PooledConnection) -> Result<usize> {
|
||||||
|
info!("database schema needs update from 9->10");
|
||||||
|
// Those old indexes were actually helpful...
|
||||||
|
let upgrade_sql = r##"
|
||||||
|
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
|
||||||
|
PRAGMA user_version = 10;
|
||||||
|
"##;
|
||||||
|
match conn.execute_batch(upgrade_sql) {
|
||||||
|
Ok(()) => {
|
||||||
|
info!("database schema upgraded v9 -> v10");
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
error!("update failed: {}", err);
|
||||||
|
panic!("database could not be upgraded");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(10)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mig_10_to_11(conn: &mut PooledConnection) -> Result<usize> {
|
||||||
|
info!("database schema needs update from 10->11");
|
||||||
|
// Those old indexes were actually helpful...
|
||||||
|
let upgrade_sql = r##"
|
||||||
|
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
|
||||||
|
reindex;
|
||||||
|
pragma optimize;
|
||||||
|
PRAGMA user_version = 11;
|
||||||
|
"##;
|
||||||
|
match conn.execute_batch(upgrade_sql) {
|
||||||
|
Ok(()) => {
|
||||||
|
info!("database schema upgraded v10 -> v11");
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
error!("update failed: {}", err);
|
||||||
|
panic!("database could not be upgraded");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(11)
|
||||||
}
|
}
|
||||||
|
719
src/server.rs
Normal file
719
src/server.rs
Normal file
@@ -0,0 +1,719 @@
|
|||||||
|
//! Server process
|
||||||
|
use crate::close::Close;
|
||||||
|
use crate::close::CloseCmd;
|
||||||
|
use crate::config::{Settings, VerifiedUsersMode};
|
||||||
|
use crate::conn;
|
||||||
|
use crate::db;
|
||||||
|
use crate::db::SubmittedEvent;
|
||||||
|
use crate::error::{Error, Result};
|
||||||
|
use crate::event::Event;
|
||||||
|
use crate::event::EventCmd;
|
||||||
|
use crate::info::RelayInfo;
|
||||||
|
use crate::nip05;
|
||||||
|
use crate::notice::Notice;
|
||||||
|
use crate::subscription::Subscription;
|
||||||
|
use futures::SinkExt;
|
||||||
|
use futures::StreamExt;
|
||||||
|
use governor::{Jitter, Quota, RateLimiter};
|
||||||
|
use http::header::HeaderMap;
|
||||||
|
use hyper::header::ACCEPT;
|
||||||
|
use hyper::service::{make_service_fn, service_fn};
|
||||||
|
use hyper::upgrade::Upgraded;
|
||||||
|
use hyper::{
|
||||||
|
header, server::conn::AddrStream, upgrade, Body, Request, Response, Server, StatusCode,
|
||||||
|
};
|
||||||
|
use rusqlite::OpenFlags;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::json;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::convert::Infallible;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::sync::mpsc::Receiver as MpscReceiver;
|
||||||
|
use std::time::Duration;
|
||||||
|
use std::time::Instant;
|
||||||
|
use tokio::runtime::Builder;
|
||||||
|
use tokio::sync::broadcast::{self, Receiver, Sender};
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tokio::sync::oneshot;
|
||||||
|
use tokio_tungstenite::WebSocketStream;
|
||||||
|
use tracing::*;
|
||||||
|
use tungstenite::error::CapacityError::MessageTooLong;
|
||||||
|
use tungstenite::error::Error as WsError;
|
||||||
|
use tungstenite::handshake;
|
||||||
|
use tungstenite::protocol::Message;
|
||||||
|
use tungstenite::protocol::WebSocketConfig;
|
||||||
|
|
||||||
|
/// Handle arbitrary HTTP requests, including for WebSocket upgrades.
|
||||||
|
async fn handle_web_request(
|
||||||
|
mut request: Request<Body>,
|
||||||
|
pool: db::SqlitePool,
|
||||||
|
settings: Settings,
|
||||||
|
remote_addr: SocketAddr,
|
||||||
|
broadcast: Sender<Event>,
|
||||||
|
event_tx: tokio::sync::mpsc::Sender<SubmittedEvent>,
|
||||||
|
shutdown: Receiver<()>,
|
||||||
|
) -> Result<Response<Body>, Infallible> {
|
||||||
|
match (
|
||||||
|
request.uri().path(),
|
||||||
|
request.headers().contains_key(header::UPGRADE),
|
||||||
|
) {
|
||||||
|
// Request for / as websocket
|
||||||
|
("/", true) => {
|
||||||
|
trace!("websocket with upgrade request");
|
||||||
|
//assume request is a handshake, so create the handshake response
|
||||||
|
let response = match handshake::server::create_response_with_body(&request, || {
|
||||||
|
Body::empty()
|
||||||
|
}) {
|
||||||
|
Ok(response) => {
|
||||||
|
//in case the handshake response creation succeeds,
|
||||||
|
//spawn a task to handle the websocket connection
|
||||||
|
tokio::spawn(async move {
|
||||||
|
//using the hyper feature of upgrading a connection
|
||||||
|
match upgrade::on(&mut request).await {
|
||||||
|
//if successfully upgraded
|
||||||
|
Ok(upgraded) => {
|
||||||
|
// set WebSocket configuration options
|
||||||
|
let config = WebSocketConfig {
|
||||||
|
max_message_size: settings.limits.max_ws_message_bytes,
|
||||||
|
max_frame_size: settings.limits.max_ws_frame_bytes,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
//create a websocket stream from the upgraded object
|
||||||
|
let ws_stream = WebSocketStream::from_raw_socket(
|
||||||
|
//pass the upgraded object
|
||||||
|
//as the base layer stream of the Websocket
|
||||||
|
upgraded,
|
||||||
|
tokio_tungstenite::tungstenite::protocol::Role::Server,
|
||||||
|
Some(config),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let user_agent = get_header_string("user-agent", request.headers());
|
||||||
|
// determine the remote IP from headers if the exist
|
||||||
|
let header_ip = settings
|
||||||
|
.network
|
||||||
|
.remote_ip_header
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|x| get_header_string(x, request.headers()));
|
||||||
|
// use the socket addr as a backup
|
||||||
|
let remote_ip =
|
||||||
|
header_ip.unwrap_or_else(|| remote_addr.ip().to_string());
|
||||||
|
let client_info = ClientInfo {
|
||||||
|
remote_ip,
|
||||||
|
user_agent,
|
||||||
|
};
|
||||||
|
// spawn a nostr server with our websocket
|
||||||
|
tokio::spawn(nostr_server(
|
||||||
|
pool,
|
||||||
|
client_info,
|
||||||
|
settings,
|
||||||
|
ws_stream,
|
||||||
|
broadcast,
|
||||||
|
event_tx,
|
||||||
|
shutdown,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
// todo: trace, don't print...
|
||||||
|
Err(e) => println!(
|
||||||
|
"error when trying to upgrade connection \
|
||||||
|
from address {} to websocket connection. \
|
||||||
|
Error is: {}",
|
||||||
|
remote_addr, e
|
||||||
|
),
|
||||||
|
}
|
||||||
|
});
|
||||||
|
//return the response to the handshake request
|
||||||
|
response
|
||||||
|
}
|
||||||
|
Err(error) => {
|
||||||
|
warn!("websocket response failed");
|
||||||
|
let mut res =
|
||||||
|
Response::new(Body::from(format!("Failed to create websocket: {}", error)));
|
||||||
|
*res.status_mut() = StatusCode::BAD_REQUEST;
|
||||||
|
return Ok(res);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok::<_, Infallible>(response)
|
||||||
|
}
|
||||||
|
// Request for Relay info
|
||||||
|
("/", false) => {
|
||||||
|
// handle request at root with no upgrade header
|
||||||
|
// Check if this is a nostr server info request
|
||||||
|
let accept_header = &request.headers().get(ACCEPT);
|
||||||
|
// check if application/nostr+json is included
|
||||||
|
if let Some(media_types) = accept_header {
|
||||||
|
if let Ok(mt_str) = media_types.to_str() {
|
||||||
|
if mt_str.contains("application/nostr+json") {
|
||||||
|
// build a relay info response
|
||||||
|
debug!("Responding to server info request");
|
||||||
|
let rinfo = RelayInfo::from(settings.info);
|
||||||
|
let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap());
|
||||||
|
return Ok(Response::builder()
|
||||||
|
.status(200)
|
||||||
|
.header("Content-Type", "application/nostr+json")
|
||||||
|
.header("Access-Control-Allow-Origin", "*")
|
||||||
|
.body(b)
|
||||||
|
.unwrap());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(200)
|
||||||
|
.header("Content-Type", "text/plain")
|
||||||
|
.body(Body::from("Please use a Nostr client to connect."))
|
||||||
|
.unwrap())
|
||||||
|
}
|
||||||
|
(_, _) => {
|
||||||
|
//handle any other url
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::NOT_FOUND)
|
||||||
|
.body(Body::from("Nothing here."))
|
||||||
|
.unwrap())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_header_string(header: &str, headers: &HeaderMap) -> Option<String> {
|
||||||
|
headers
|
||||||
|
.get(header)
|
||||||
|
.and_then(|x| x.to_str().ok().map(|x| x.to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// return on a control-c or internally requested shutdown signal
|
||||||
|
async fn ctrl_c_or_signal(mut shutdown_signal: Receiver<()>) {
|
||||||
|
let mut term_signal = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
|
||||||
|
.expect("could not define signal");
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
_ = shutdown_signal.recv() => {
|
||||||
|
info!("Shutting down webserver as requested");
|
||||||
|
// server shutting down, exit loop
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
_ = tokio::signal::ctrl_c() => {
|
||||||
|
info!("Shutting down webserver due to SIGINT");
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
_ = term_signal.recv() => {
|
||||||
|
info!("Shutting down webserver due to SIGTERM");
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Start running a Nostr relay server.
|
||||||
|
pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result<(), Error> {
|
||||||
|
trace!("Config: {:?}", settings);
|
||||||
|
// do some config validation.
|
||||||
|
if !Path::new(&settings.database.data_directory).is_dir() {
|
||||||
|
error!("Database directory does not exist");
|
||||||
|
return Err(Error::DatabaseDirError);
|
||||||
|
}
|
||||||
|
let addr = format!(
|
||||||
|
"{}:{}",
|
||||||
|
settings.network.address.trim(),
|
||||||
|
settings.network.port
|
||||||
|
);
|
||||||
|
let socket_addr = addr.parse().expect("listening address not valid");
|
||||||
|
// address whitelisting settings
|
||||||
|
if let Some(addr_whitelist) = &settings.authorization.pubkey_whitelist {
|
||||||
|
info!(
|
||||||
|
"Event publishing restricted to {} pubkey(s)",
|
||||||
|
addr_whitelist.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
// check if NIP-05 enforced user verification is on
|
||||||
|
if settings.verified_users.is_active() {
|
||||||
|
info!(
|
||||||
|
"NIP-05 user verification mode:{:?}",
|
||||||
|
settings.verified_users.mode
|
||||||
|
);
|
||||||
|
if let Some(d) = settings.verified_users.verify_update_duration() {
|
||||||
|
info!("NIP-05 check user verification every: {:?}", d);
|
||||||
|
}
|
||||||
|
if let Some(d) = settings.verified_users.verify_expiration_duration() {
|
||||||
|
info!("NIP-05 user verification expires after: {:?}", d);
|
||||||
|
}
|
||||||
|
if let Some(wl) = &settings.verified_users.domain_whitelist {
|
||||||
|
info!("NIP-05 domain whitelist: {:?}", wl);
|
||||||
|
}
|
||||||
|
if let Some(bl) = &settings.verified_users.domain_blacklist {
|
||||||
|
info!("NIP-05 domain blacklist: {:?}", bl);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// configure tokio runtime
|
||||||
|
let rt = Builder::new_multi_thread()
|
||||||
|
.enable_all()
|
||||||
|
.thread_name("tokio-ws")
|
||||||
|
// limit concurrent SQLite blocking threads
|
||||||
|
.max_blocking_threads(settings.limits.max_blocking_threads)
|
||||||
|
.on_thread_start(|| {
|
||||||
|
trace!("started new thread");
|
||||||
|
})
|
||||||
|
.on_thread_stop(|| {
|
||||||
|
trace!("stopping thread");
|
||||||
|
})
|
||||||
|
.build()
|
||||||
|
.unwrap();
|
||||||
|
// start tokio
|
||||||
|
rt.block_on(async {
|
||||||
|
let broadcast_buffer_limit = settings.limits.broadcast_buffer;
|
||||||
|
let persist_buffer_limit = settings.limits.event_persist_buffer;
|
||||||
|
let verified_users_active = settings.verified_users.is_active();
|
||||||
|
let db_min_conn = settings.database.min_conn;
|
||||||
|
let db_max_conn = settings.database.max_conn;
|
||||||
|
let settings = settings.clone();
|
||||||
|
info!("listening on: {}", socket_addr);
|
||||||
|
// all client-submitted valid events are broadcast to every
|
||||||
|
// other client on this channel. This should be large enough
|
||||||
|
// to accomodate slower readers (messages are dropped if
|
||||||
|
// clients can not keep up).
|
||||||
|
let (bcast_tx, _) = broadcast::channel::<Event>(broadcast_buffer_limit);
|
||||||
|
// validated events that need to be persisted are sent to the
|
||||||
|
// database on via this channel.
|
||||||
|
let (event_tx, event_rx) = mpsc::channel::<SubmittedEvent>(persist_buffer_limit);
|
||||||
|
// establish a channel for letting all threads now about a
|
||||||
|
// requested server shutdown.
|
||||||
|
let (invoke_shutdown, shutdown_listen) = broadcast::channel::<()>(1);
|
||||||
|
// create a channel for sending any new metadata event. These
|
||||||
|
// will get processed relatively slowly (a potentially
|
||||||
|
// multi-second blocking HTTP call) on a single thread, so we
|
||||||
|
// buffer requests on the channel. No harm in dropping events
|
||||||
|
// here, since we are protecting against DoS. This can make
|
||||||
|
// it difficult to setup initial metadata in bulk, since
|
||||||
|
// overwhelming this will drop events and won't register
|
||||||
|
// metadata events.
|
||||||
|
let (metadata_tx, metadata_rx) = broadcast::channel::<Event>(4096);
|
||||||
|
// start the database writer thread. Give it a channel for
|
||||||
|
// writing events, and for publishing events that have been
|
||||||
|
// written (to all connected clients).
|
||||||
|
db::db_writer(
|
||||||
|
settings.clone(),
|
||||||
|
event_rx,
|
||||||
|
bcast_tx.clone(),
|
||||||
|
metadata_tx.clone(),
|
||||||
|
shutdown_listen,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
info!("db writer created");
|
||||||
|
|
||||||
|
// create a nip-05 verifier thread; if enabled.
|
||||||
|
if settings.verified_users.mode != VerifiedUsersMode::Disabled {
|
||||||
|
let verifier_opt =
|
||||||
|
nip05::Verifier::new(metadata_rx, bcast_tx.clone(), settings.clone());
|
||||||
|
if let Ok(mut v) = verifier_opt {
|
||||||
|
if verified_users_active {
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
info!("starting up NIP-05 verifier...");
|
||||||
|
v.run().await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// build a connection pool for DB maintenance
|
||||||
|
let maintenance_pool = db::build_pool(
|
||||||
|
"maintenance writer",
|
||||||
|
&settings,
|
||||||
|
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
db::db_maintenance(maintenance_pool).await;
|
||||||
|
|
||||||
|
// listen for (external to tokio) shutdown request
|
||||||
|
let controlled_shutdown = invoke_shutdown.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
info!("control message listener started");
|
||||||
|
match shutdown_rx.recv() {
|
||||||
|
Ok(()) => {
|
||||||
|
info!("control message requesting shutdown");
|
||||||
|
controlled_shutdown.send(()).ok();
|
||||||
|
}
|
||||||
|
Err(std::sync::mpsc::RecvError) => {
|
||||||
|
// FIXME: spurious error on startup?
|
||||||
|
debug!("shutdown requestor is disconnected");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
});
|
||||||
|
// listen for ctrl-c interruupts
|
||||||
|
let ctrl_c_shutdown = invoke_shutdown.clone();
|
||||||
|
// listener for webserver shutdown
|
||||||
|
let webserver_shutdown_listen = invoke_shutdown.subscribe();
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
tokio::signal::ctrl_c().await.unwrap();
|
||||||
|
info!("shutting down due to SIGINT (main)");
|
||||||
|
ctrl_c_shutdown.send(()).ok();
|
||||||
|
});
|
||||||
|
// build a connection pool for sqlite connections
|
||||||
|
let pool = db::build_pool(
|
||||||
|
"client query",
|
||||||
|
&settings,
|
||||||
|
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
|
||||||
|
db_min_conn,
|
||||||
|
db_max_conn,
|
||||||
|
true,
|
||||||
|
);
|
||||||
|
// A `Service` is needed for every connection, so this
|
||||||
|
// creates one from our `handle_request` function.
|
||||||
|
let make_svc = make_service_fn(|conn: &AddrStream| {
|
||||||
|
let svc_pool = pool.clone();
|
||||||
|
let remote_addr = conn.remote_addr();
|
||||||
|
let bcast = bcast_tx.clone();
|
||||||
|
let event = event_tx.clone();
|
||||||
|
let stop = invoke_shutdown.clone();
|
||||||
|
let settings = settings.clone();
|
||||||
|
async move {
|
||||||
|
// service_fn converts our function into a `Service`
|
||||||
|
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
|
||||||
|
handle_web_request(
|
||||||
|
request,
|
||||||
|
svc_pool.clone(),
|
||||||
|
settings.clone(),
|
||||||
|
remote_addr,
|
||||||
|
bcast.clone(),
|
||||||
|
event.clone(),
|
||||||
|
stop.subscribe(),
|
||||||
|
)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
});
|
||||||
|
let server = Server::bind(&socket_addr)
|
||||||
|
.serve(make_svc)
|
||||||
|
.with_graceful_shutdown(ctrl_c_or_signal(webserver_shutdown_listen));
|
||||||
|
// run hyper in this thread. This is why the thread does not return.
|
||||||
|
if let Err(e) = server.await {
|
||||||
|
eprintln!("server error: {}", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Nostr protocol messages from a client
|
||||||
|
#[derive(Deserialize, Serialize, Clone, PartialEq, Eq, Debug)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
pub enum NostrMessage {
|
||||||
|
/// An `EVENT` message
|
||||||
|
EventMsg(EventCmd),
|
||||||
|
/// A `REQ` message
|
||||||
|
SubMsg(Subscription),
|
||||||
|
/// A `CLOSE` message
|
||||||
|
CloseMsg(CloseCmd),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert Message to NostrMessage
|
||||||
|
fn convert_to_msg(msg: String, max_bytes: Option<usize>) -> Result<NostrMessage> {
|
||||||
|
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
|
||||||
|
match parsed_res {
|
||||||
|
Ok(m) => {
|
||||||
|
if let NostrMessage::EventMsg(_) = m {
|
||||||
|
if let Some(max_size) = max_bytes {
|
||||||
|
// check length, ensure that some max size is set.
|
||||||
|
if msg.len() > max_size && max_size > 0 {
|
||||||
|
return Err(Error::EventMaxLengthError(msg.len()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(m)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
debug!("proto parse error: {:?}", e);
|
||||||
|
debug!("parse error on message: {}", msg.trim());
|
||||||
|
Err(Error::ProtoParseError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Turn a string into a NOTICE message ready to send over a WebSocket
|
||||||
|
fn make_notice_message(notice: Notice) -> Message {
|
||||||
|
let json = match notice {
|
||||||
|
Notice::Message(ref msg) => json!(["NOTICE", msg]),
|
||||||
|
Notice::EventResult(ref res) => json!(["OK", res.id, res.status.to_bool(), res.msg]),
|
||||||
|
};
|
||||||
|
|
||||||
|
Message::text(json.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ClientInfo {
|
||||||
|
remote_ip: String,
|
||||||
|
user_agent: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handle new client connections. This runs through an event loop
|
||||||
|
/// for all client communication.
|
||||||
|
async fn nostr_server(
|
||||||
|
pool: db::SqlitePool,
|
||||||
|
client_info: ClientInfo,
|
||||||
|
settings: Settings,
|
||||||
|
mut ws_stream: WebSocketStream<Upgraded>,
|
||||||
|
broadcast: Sender<Event>,
|
||||||
|
event_tx: mpsc::Sender<SubmittedEvent>,
|
||||||
|
mut shutdown: Receiver<()>,
|
||||||
|
) {
|
||||||
|
// the time this websocket nostr server started
|
||||||
|
let orig_start = Instant::now();
|
||||||
|
// get a broadcast channel for clients to communicate on
|
||||||
|
let mut bcast_rx = broadcast.subscribe();
|
||||||
|
// Track internal client state
|
||||||
|
let mut conn = conn::ClientConn::new(client_info.remote_ip);
|
||||||
|
// subscription creation rate limiting
|
||||||
|
let mut sub_lim_opt = None;
|
||||||
|
// 100ms jitter when the rate limiter returns
|
||||||
|
let jitter = Jitter::up_to(Duration::from_millis(100));
|
||||||
|
let sub_per_min_setting = settings.limits.subscriptions_per_min;
|
||||||
|
if let Some(sub_per_min) = sub_per_min_setting {
|
||||||
|
if sub_per_min > 0 {
|
||||||
|
trace!("Rate limits for sub creation ({}/min)", sub_per_min);
|
||||||
|
let quota_time = core::num::NonZeroU32::new(sub_per_min).unwrap();
|
||||||
|
let quota = Quota::per_minute(quota_time);
|
||||||
|
sub_lim_opt = Some(RateLimiter::direct(quota));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Use the remote IP as the client identifier
|
||||||
|
let cid = conn.get_client_prefix();
|
||||||
|
// Create a channel for receiving query results from the database.
|
||||||
|
// we will send out the tx handle to any query we generate.
|
||||||
|
// this has capacity for some of the larger requests we see, which
|
||||||
|
// should allow the DB thread to release the handle earlier.
|
||||||
|
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(20000);
|
||||||
|
// Create channel for receiving NOTICEs
|
||||||
|
let (notice_tx, mut notice_rx) = mpsc::channel::<Notice>(128);
|
||||||
|
|
||||||
|
// last time this client sent data (message, ping, etc.)
|
||||||
|
let mut last_message_time = Instant::now();
|
||||||
|
|
||||||
|
// ping interval (every 5 minutes)
|
||||||
|
let default_ping_dur = Duration::from_secs(settings.network.ping_interval_seconds.into());
|
||||||
|
|
||||||
|
// disconnect after 20 minutes without a ping response or event.
|
||||||
|
let max_quiet_time = Duration::from_secs(60 * 20);
|
||||||
|
|
||||||
|
let start = tokio::time::Instant::now() + default_ping_dur;
|
||||||
|
let mut ping_interval = tokio::time::interval_at(start, default_ping_dur);
|
||||||
|
|
||||||
|
// maintain a hashmap of a oneshot channel for active subscriptions.
|
||||||
|
// when these subscriptions are cancelled, make a message
|
||||||
|
// available to the executing query so it knows to stop.
|
||||||
|
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
|
||||||
|
// for stats, keep track of how many events the client published,
|
||||||
|
// and how many it received from queries.
|
||||||
|
let mut client_published_event_count: usize = 0;
|
||||||
|
let mut client_received_event_count: usize = 0;
|
||||||
|
debug!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
|
||||||
|
if let Some(ua) = client_info.user_agent {
|
||||||
|
debug!("cid: {}, user-agent: {:?}", cid, ua);
|
||||||
|
}
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
_ = shutdown.recv() => {
|
||||||
|
info!("Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?}", cid, conn.ip(), orig_start.elapsed());
|
||||||
|
// server shutting down, exit loop
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
_ = ping_interval.tick() => {
|
||||||
|
// check how long since we talked to client
|
||||||
|
// if it has been too long, disconnect
|
||||||
|
if last_message_time.elapsed() > max_quiet_time {
|
||||||
|
debug!("ending connection due to lack of client ping response");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// Send a ping
|
||||||
|
ws_stream.send(Message::Ping(Vec::new())).await.ok();
|
||||||
|
},
|
||||||
|
Some(notice_msg) = notice_rx.recv() => {
|
||||||
|
ws_stream.send(make_notice_message(notice_msg)).await.ok();
|
||||||
|
},
|
||||||
|
Some(query_result) = query_rx.recv() => {
|
||||||
|
// database informed us of a query result we asked for
|
||||||
|
let subesc = query_result.sub_id.replace('"', "");
|
||||||
|
if query_result.event == "EOSE" {
|
||||||
|
let send_str = format!("[\"EOSE\",\"{}\"]", subesc);
|
||||||
|
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||||
|
} else {
|
||||||
|
client_received_event_count += 1;
|
||||||
|
// send a result
|
||||||
|
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
|
||||||
|
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// TODO: consider logging the LaggedRecv error
|
||||||
|
Ok(global_event) = bcast_rx.recv() => {
|
||||||
|
// an event has been broadcast to all clients
|
||||||
|
// first check if there is a subscription for this event.
|
||||||
|
for (s, sub) in conn.subscriptions() {
|
||||||
|
if !sub.interested_in_event(&global_event) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: serialize at broadcast time, instead of
|
||||||
|
// once for each consumer.
|
||||||
|
if let Ok(event_str) = serde_json::to_string(&global_event) {
|
||||||
|
trace!("sub match for client: {}, sub: {:?}, event: {:?}",
|
||||||
|
cid, s,
|
||||||
|
global_event.get_event_id_prefix());
|
||||||
|
// create an event response and send it
|
||||||
|
let subesc = s.replace('"', "");
|
||||||
|
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{}\",{}]", subesc, event_str))).await.ok();
|
||||||
|
} else {
|
||||||
|
warn!("could not serialize event: {:?}", global_event.get_event_id_prefix());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ws_next = ws_stream.next() => {
|
||||||
|
// update most recent message time for client
|
||||||
|
last_message_time = Instant::now();
|
||||||
|
// Consume text messages from the client, parse into Nostr messages.
|
||||||
|
let nostr_msg = match ws_next {
|
||||||
|
Some(Ok(Message::Text(m))) => {
|
||||||
|
convert_to_msg(m,settings.limits.max_event_bytes)
|
||||||
|
},
|
||||||
|
Some(Ok(Message::Binary(_))) => {
|
||||||
|
ws_stream.send(
|
||||||
|
make_notice_message(Notice::message("binary messages are not accepted".into()))).await.ok();
|
||||||
|
continue;
|
||||||
|
},
|
||||||
|
Some(Ok(Message::Ping(_) | Message::Pong(_))) => {
|
||||||
|
// get a ping/pong, ignore. tungstenite will
|
||||||
|
// send responses automatically.
|
||||||
|
continue;
|
||||||
|
},
|
||||||
|
Some(Err(WsError::Capacity(MessageTooLong{size, max_size}))) => {
|
||||||
|
ws_stream.send(
|
||||||
|
make_notice_message(Notice::message(format!("message too large ({} > {})",size, max_size)))).await.ok();
|
||||||
|
continue;
|
||||||
|
},
|
||||||
|
None |
|
||||||
|
Some(Ok(Message::Close(_)) |
|
||||||
|
Err(WsError::AlreadyClosed | WsError::ConnectionClosed |
|
||||||
|
WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
|
||||||
|
=> {
|
||||||
|
debug!("websocket close from client (cid: {}, ip: {:?})",cid, conn.ip());
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
Some(Err(WsError::Io(e))) => {
|
||||||
|
// IO errors are considered fatal
|
||||||
|
warn!("IO error (cid: {}, ip: {:?}): {:?}", cid, conn.ip(), e);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
x => {
|
||||||
|
// default condition on error is to close the client connection
|
||||||
|
info!("unknown error (cid: {}, ip: {:?}): {:?} (closing conn)", cid, conn.ip(), x);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// convert ws_next into proto_next
|
||||||
|
match nostr_msg {
|
||||||
|
Ok(NostrMessage::EventMsg(ec)) => {
|
||||||
|
// An EventCmd needs to be validated to be converted into an Event
|
||||||
|
// handle each type of message
|
||||||
|
let evid = ec.event_id().to_owned();
|
||||||
|
let parsed : Result<Event> = Result::<Event>::from(ec);
|
||||||
|
match parsed {
|
||||||
|
Ok(e) => {
|
||||||
|
let id_prefix:String = e.id.chars().take(8).collect();
|
||||||
|
debug!("successfully parsed/validated event: {:?} (cid: {})", id_prefix, cid);
|
||||||
|
// check if the event is too far in the future.
|
||||||
|
if e.is_valid_timestamp(settings.options.reject_future_seconds) {
|
||||||
|
// Write this to the database.
|
||||||
|
let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone() };
|
||||||
|
event_tx.send(submit_event).await.ok();
|
||||||
|
client_published_event_count += 1;
|
||||||
|
} else {
|
||||||
|
info!("client: {} sent a far future-dated event", cid);
|
||||||
|
if let Some(fut_sec) = settings.options.reject_future_seconds {
|
||||||
|
let msg = format!("The event created_at field is out of the acceptable range (+{}sec) for this relay.",fut_sec);
|
||||||
|
let notice = Notice::invalid(e.id, &msg);
|
||||||
|
ws_stream.send(make_notice_message(notice)).await.ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
info!("client sent an invalid event (cid: {})", cid);
|
||||||
|
ws_stream.send(make_notice_message(Notice::invalid(evid, &format!("{}", e)))).await.ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Ok(NostrMessage::SubMsg(s)) => {
|
||||||
|
debug!("subscription requested (cid: {}, sub: {:?})", cid, s.id);
|
||||||
|
// subscription handling consists of:
|
||||||
|
// * check for rate limits
|
||||||
|
// * registering the subscription so future events can be matched
|
||||||
|
// * making a channel to cancel to request later
|
||||||
|
// * sending a request for a SQL query
|
||||||
|
// Do nothing if the sub already exists.
|
||||||
|
if !conn.has_subscription(&s) {
|
||||||
|
if let Some(ref lim) = sub_lim_opt {
|
||||||
|
lim.until_ready_with_jitter(jitter).await;
|
||||||
|
}
|
||||||
|
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
|
||||||
|
match conn.subscribe(s.clone()) {
|
||||||
|
Ok(()) => {
|
||||||
|
// when we insert, if there was a previous query running with the same name, cancel it.
|
||||||
|
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
|
||||||
|
previous_query.send(()).ok();
|
||||||
|
}
|
||||||
|
// start a database query. this spawns a blocking database query on a worker thread.
|
||||||
|
db::db_query(s, cid.to_owned(), pool.clone(), query_tx.clone(), abandon_query_rx).await;
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
info!("Subscription error: {} (cid: {}, sub: {:?})", e, cid, s.id);
|
||||||
|
ws_stream.send(make_notice_message(Notice::message(format!("Subscription error: {}", e)))).await.ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
info!("client sent duplicate subscription, ignoring (cid: {}, sub: {:?})", cid, s.id);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Ok(NostrMessage::CloseMsg(cc)) => {
|
||||||
|
// closing a request simply removes the subscription.
|
||||||
|
let parsed : Result<Close> = Result::<Close>::from(cc);
|
||||||
|
if let Ok(c) = parsed {
|
||||||
|
// check if a query is currently
|
||||||
|
// running, and remove it if so.
|
||||||
|
let stop_tx = running_queries.remove(&c.id);
|
||||||
|
if let Some(tx) = stop_tx {
|
||||||
|
tx.send(()).ok();
|
||||||
|
}
|
||||||
|
// stop checking new events against
|
||||||
|
// the subscription
|
||||||
|
conn.unsubscribe(&c);
|
||||||
|
} else {
|
||||||
|
info!("invalid command ignored");
|
||||||
|
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(Error::ConnError) => {
|
||||||
|
debug!("got connection close/error, disconnecting cid: {}, ip: {:?}",cid, conn.ip());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(Error::EventMaxLengthError(s)) => {
|
||||||
|
info!("client sent event larger ({} bytes) than max size (cid: {})", s, cid);
|
||||||
|
ws_stream.send(make_notice_message(Notice::message("event exceeded max size".into()))).await.ok();
|
||||||
|
},
|
||||||
|
Err(Error::ProtoParseError) => {
|
||||||
|
info!("client sent event that could not be parsed (cid: {})", cid);
|
||||||
|
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
info!("got non-fatal error from client (cid: {}, error: {:?}", cid, e);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// connection cleanup - ensure any still running queries are terminated.
|
||||||
|
for (_, stop_tx) in running_queries {
|
||||||
|
stop_tx.send(()).ok();
|
||||||
|
}
|
||||||
|
info!(
|
||||||
|
"stopping client connection (cid: {}, ip: {:?}, sent: {} events, recv: {} events, connected: {:?})",
|
||||||
|
cid,
|
||||||
|
conn.ip(),
|
||||||
|
client_published_event_count,
|
||||||
|
client_received_event_count,
|
||||||
|
orig_start.elapsed()
|
||||||
|
);
|
||||||
|
}
|
@@ -8,7 +8,7 @@ use std::collections::HashMap;
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
|
||||||
/// Subscription identifier and set of request filters
|
/// Subscription identifier and set of request filters
|
||||||
#[derive(Serialize, PartialEq, Debug, Clone)]
|
#[derive(Serialize, PartialEq, Eq, Debug, Clone)]
|
||||||
pub struct Subscription {
|
pub struct Subscription {
|
||||||
pub id: String,
|
pub id: String,
|
||||||
pub filters: Vec<ReqFilter>,
|
pub filters: Vec<ReqFilter>,
|
||||||
@@ -19,7 +19,7 @@ pub struct Subscription {
|
|||||||
/// Corresponds to client-provided subscription request elements. Any
|
/// Corresponds to client-provided subscription request elements. Any
|
||||||
/// element can be present if it should be used in filtering, or
|
/// element can be present if it should be used in filtering, or
|
||||||
/// absent ([`None`]) if it should be ignored.
|
/// absent ([`None`]) if it should be ignored.
|
||||||
#[derive(Serialize, PartialEq, Debug, Clone)]
|
#[derive(Serialize, PartialEq, Eq, Debug, Clone)]
|
||||||
pub struct ReqFilter {
|
pub struct ReqFilter {
|
||||||
/// Event hashes
|
/// Event hashes
|
||||||
pub ids: Option<Vec<String>>,
|
pub ids: Option<Vec<String>>,
|
||||||
@@ -31,9 +31,16 @@ pub struct ReqFilter {
|
|||||||
pub until: Option<u64>,
|
pub until: Option<u64>,
|
||||||
/// List of author public keys
|
/// List of author public keys
|
||||||
pub authors: Option<Vec<String>>,
|
pub authors: Option<Vec<String>>,
|
||||||
|
/// Limit number of results
|
||||||
|
pub limit: Option<u64>,
|
||||||
/// Set of tags
|
/// Set of tags
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
pub tags: Option<HashMap<String, HashSet<String>>>,
|
pub tags: Option<HashMap<char, HashSet<String>>>,
|
||||||
|
/// Force no matches due to malformed data
|
||||||
|
// we can't represent it in the req filter, so we don't want to
|
||||||
|
// erroneously match. This basically indicates the req tried to
|
||||||
|
// do something invalid.
|
||||||
|
pub force_no_match: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'de> Deserialize<'de> for ReqFilter {
|
impl<'de> Deserialize<'de> for ReqFilter {
|
||||||
@@ -54,7 +61,9 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
|||||||
since: None,
|
since: None,
|
||||||
until: None,
|
until: None,
|
||||||
authors: None,
|
authors: None,
|
||||||
|
limit: None,
|
||||||
tags: None,
|
tags: None,
|
||||||
|
force_no_match: false,
|
||||||
};
|
};
|
||||||
let mut ts = None;
|
let mut ts = None;
|
||||||
// iterate through each key, and assign values that exist
|
// iterate through each key, and assign values that exist
|
||||||
@@ -68,22 +77,28 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
|||||||
rf.since = Deserialize::deserialize(val).ok();
|
rf.since = Deserialize::deserialize(val).ok();
|
||||||
} else if key == "until" {
|
} else if key == "until" {
|
||||||
rf.until = Deserialize::deserialize(val).ok();
|
rf.until = Deserialize::deserialize(val).ok();
|
||||||
|
} else if key == "limit" {
|
||||||
|
rf.limit = Deserialize::deserialize(val).ok();
|
||||||
} else if key == "authors" {
|
} else if key == "authors" {
|
||||||
rf.authors = Deserialize::deserialize(val).ok();
|
rf.authors = Deserialize::deserialize(val).ok();
|
||||||
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
|
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
|
||||||
// remove the prefix
|
if let Some(tag_search) = tag_search_char_from_filter(key) {
|
||||||
let tagname = &key[1..];
|
if ts.is_none() {
|
||||||
if ts.is_none() {
|
// Initialize the tag if necessary
|
||||||
// Initialize the tag if necessary
|
ts = Some(HashMap::new());
|
||||||
ts = Some(HashMap::new());
|
|
||||||
}
|
|
||||||
if let Some(m) = ts.as_mut() {
|
|
||||||
let tag_vals: Option<Vec<String>> = Deserialize::deserialize(val).ok();
|
|
||||||
if let Some(v) = tag_vals {
|
|
||||||
let hs = HashSet::from_iter(v.into_iter());
|
|
||||||
m.insert(tagname.to_owned(), hs);
|
|
||||||
}
|
}
|
||||||
};
|
if let Some(m) = ts.as_mut() {
|
||||||
|
let tag_vals: Option<Vec<String>> = Deserialize::deserialize(val).ok();
|
||||||
|
if let Some(v) = tag_vals {
|
||||||
|
let hs = HashSet::from_iter(v.into_iter());
|
||||||
|
m.insert(tag_search.to_owned(), hs);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
// tag search that is multi-character, don't add to subscription
|
||||||
|
rf.force_no_match = true;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rf.tags = ts;
|
rf.tags = ts;
|
||||||
@@ -91,6 +106,26 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Attempt to form a single-char identifier from a tag search filter
|
||||||
|
fn tag_search_char_from_filter(tagname: &str) -> Option<char> {
|
||||||
|
let tagname_nohash = &tagname[1..];
|
||||||
|
// We return the tag character if and only if the tagname consists
|
||||||
|
// of a single char.
|
||||||
|
let mut tagnamechars = tagname_nohash.chars();
|
||||||
|
let firstchar = tagnamechars.next();
|
||||||
|
match firstchar {
|
||||||
|
Some(_) => {
|
||||||
|
// check second char
|
||||||
|
if tagnamechars.next().is_none() {
|
||||||
|
firstchar
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<'de> Deserialize<'de> for Subscription {
|
impl<'de> Deserialize<'de> for Subscription {
|
||||||
/// Custom deserializer for subscriptions, which have a more
|
/// Custom deserializer for subscriptions, which have a more
|
||||||
/// complex structure than the other message types.
|
/// complex structure than the other message types.
|
||||||
@@ -185,11 +220,22 @@ impl ReqFilter {
|
|||||||
.unwrap_or(true)
|
.unwrap_or(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn delegated_authors_match(&self, event: &Event) -> bool {
|
||||||
|
if let Some(delegated_pubkey) = &event.delegated_by {
|
||||||
|
self.authors
|
||||||
|
.as_ref()
|
||||||
|
.map(|vs| prefix_match(vs, delegated_pubkey))
|
||||||
|
.unwrap_or(true)
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn tag_match(&self, event: &Event) -> bool {
|
fn tag_match(&self, event: &Event) -> bool {
|
||||||
// get the hashset from the filter.
|
// get the hashset from the filter.
|
||||||
if let Some(map) = &self.tags {
|
if let Some(map) = &self.tags {
|
||||||
for (key, val) in map.iter() {
|
for (key, val) in map.iter() {
|
||||||
let tag_match = event.generic_tag_val_intersect(key, val);
|
let tag_match = event.generic_tag_val_intersect(*key, val);
|
||||||
// if there is no match for this tag, the match fails.
|
// if there is no match for this tag, the match fails.
|
||||||
if !tag_match {
|
if !tag_match {
|
||||||
return false;
|
return false;
|
||||||
@@ -214,9 +260,11 @@ impl ReqFilter {
|
|||||||
// self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
|
// self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
|
||||||
self.ids_match(event)
|
self.ids_match(event)
|
||||||
&& self.since.map(|t| event.created_at > t).unwrap_or(true)
|
&& self.since.map(|t| event.created_at > t).unwrap_or(true)
|
||||||
|
&& self.until.map(|t| event.created_at < t).unwrap_or(true)
|
||||||
&& self.kind_match(event.kind)
|
&& self.kind_match(event.kind)
|
||||||
&& self.authors_match(event)
|
&& (self.authors_match(event) || self.delegated_authors_match(event))
|
||||||
&& self.tag_match(event)
|
&& self.tag_match(event)
|
||||||
|
&& !self.force_no_match
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -274,6 +322,7 @@ mod tests {
|
|||||||
let e = Event {
|
let e = Event {
|
||||||
id: "foo".to_owned(),
|
id: "foo".to_owned(),
|
||||||
pubkey: "abcd".to_owned(),
|
pubkey: "abcd".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
created_at: 0,
|
created_at: 0,
|
||||||
kind: 0,
|
kind: 0,
|
||||||
tags: Vec::new(),
|
tags: Vec::new(),
|
||||||
@@ -292,6 +341,7 @@ mod tests {
|
|||||||
let e = Event {
|
let e = Event {
|
||||||
id: "abcd".to_owned(),
|
id: "abcd".to_owned(),
|
||||||
pubkey: "".to_owned(),
|
pubkey: "".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
created_at: 0,
|
created_at: 0,
|
||||||
kind: 0,
|
kind: 0,
|
||||||
tags: Vec::new(),
|
tags: Vec::new(),
|
||||||
@@ -310,6 +360,7 @@ mod tests {
|
|||||||
let e = Event {
|
let e = Event {
|
||||||
id: "abcde".to_owned(),
|
id: "abcde".to_owned(),
|
||||||
pubkey: "".to_owned(),
|
pubkey: "".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
created_at: 0,
|
created_at: 0,
|
||||||
kind: 0,
|
kind: 0,
|
||||||
tags: Vec::new(),
|
tags: Vec::new(),
|
||||||
@@ -321,6 +372,52 @@ mod tests {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn interest_until() -> Result<()> {
|
||||||
|
// subscription with a filter for ID and time
|
||||||
|
let s: Subscription =
|
||||||
|
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "until": 1000}]"#)?;
|
||||||
|
let e = Event {
|
||||||
|
id: "abc".to_owned(),
|
||||||
|
pubkey: "".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
|
created_at: 50,
|
||||||
|
kind: 0,
|
||||||
|
tags: Vec::new(),
|
||||||
|
content: "".to_owned(),
|
||||||
|
sig: "".to_owned(),
|
||||||
|
tagidx: None,
|
||||||
|
};
|
||||||
|
assert!(s.interested_in_event(&e));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn interest_range() -> Result<()> {
|
||||||
|
// subscription with a filter for ID and time
|
||||||
|
let s_in: Subscription =
|
||||||
|
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 200}]"#)?;
|
||||||
|
let s_before: Subscription =
|
||||||
|
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 140}]"#)?;
|
||||||
|
let s_after: Subscription =
|
||||||
|
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 160, "until": 200}]"#)?;
|
||||||
|
let e = Event {
|
||||||
|
id: "abc".to_owned(),
|
||||||
|
pubkey: "".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
|
created_at: 150,
|
||||||
|
kind: 0,
|
||||||
|
tags: Vec::new(),
|
||||||
|
content: "".to_owned(),
|
||||||
|
sig: "".to_owned(),
|
||||||
|
tagidx: None,
|
||||||
|
};
|
||||||
|
assert!(s_in.interested_in_event(&e));
|
||||||
|
assert!(!s_before.interested_in_event(&e));
|
||||||
|
assert!(!s_after.interested_in_event(&e));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn interest_time_and_id() -> Result<()> {
|
fn interest_time_and_id() -> Result<()> {
|
||||||
// subscription with a filter for ID and time
|
// subscription with a filter for ID and time
|
||||||
@@ -329,6 +426,7 @@ mod tests {
|
|||||||
let e = Event {
|
let e = Event {
|
||||||
id: "abc".to_owned(),
|
id: "abc".to_owned(),
|
||||||
pubkey: "".to_owned(),
|
pubkey: "".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
created_at: 50,
|
created_at: 50,
|
||||||
kind: 0,
|
kind: 0,
|
||||||
tags: Vec::new(),
|
tags: Vec::new(),
|
||||||
@@ -347,6 +445,7 @@ mod tests {
|
|||||||
let e = Event {
|
let e = Event {
|
||||||
id: "abc".to_owned(),
|
id: "abc".to_owned(),
|
||||||
pubkey: "".to_owned(),
|
pubkey: "".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
created_at: 1001,
|
created_at: 1001,
|
||||||
kind: 0,
|
kind: 0,
|
||||||
tags: Vec::new(),
|
tags: Vec::new(),
|
||||||
@@ -365,6 +464,7 @@ mod tests {
|
|||||||
let e = Event {
|
let e = Event {
|
||||||
id: "abc".to_owned(),
|
id: "abc".to_owned(),
|
||||||
pubkey: "".to_owned(),
|
pubkey: "".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
created_at: 0,
|
created_at: 0,
|
||||||
kind: 0,
|
kind: 0,
|
||||||
tags: Vec::new(),
|
tags: Vec::new(),
|
||||||
@@ -383,6 +483,7 @@ mod tests {
|
|||||||
let e = Event {
|
let e = Event {
|
||||||
id: "123".to_owned(),
|
id: "123".to_owned(),
|
||||||
pubkey: "abc".to_owned(),
|
pubkey: "abc".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
created_at: 0,
|
created_at: 0,
|
||||||
kind: 0,
|
kind: 0,
|
||||||
tags: Vec::new(),
|
tags: Vec::new(),
|
||||||
@@ -393,14 +494,15 @@ mod tests {
|
|||||||
assert!(s.interested_in_event(&e));
|
assert!(s.interested_in_event(&e));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
#[test]
|
|
||||||
|
|
||||||
|
#[test]
|
||||||
fn authors_multi_pubkey() -> Result<()> {
|
fn authors_multi_pubkey() -> Result<()> {
|
||||||
// check for any of a set of authors, against the pubkey
|
// check for any of a set of authors, against the pubkey
|
||||||
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors":["abc", "bcd"]}]"#)?;
|
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors":["abc", "bcd"]}]"#)?;
|
||||||
let e = Event {
|
let e = Event {
|
||||||
id: "123".to_owned(),
|
id: "123".to_owned(),
|
||||||
pubkey: "bcd".to_owned(),
|
pubkey: "bcd".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
created_at: 0,
|
created_at: 0,
|
||||||
kind: 0,
|
kind: 0,
|
||||||
tags: Vec::new(),
|
tags: Vec::new(),
|
||||||
@@ -419,6 +521,7 @@ mod tests {
|
|||||||
let e = Event {
|
let e = Event {
|
||||||
id: "123".to_owned(),
|
id: "123".to_owned(),
|
||||||
pubkey: "xyz".to_owned(),
|
pubkey: "xyz".to_owned(),
|
||||||
|
delegated_by: None,
|
||||||
created_at: 0,
|
created_at: 0,
|
||||||
kind: 0,
|
kind: 0,
|
||||||
tags: Vec::new(),
|
tags: Vec::new(),
|
||||||
|
18
src/utils.rs
18
src/utils.rs
@@ -13,3 +13,21 @@ pub fn unix_time() -> u64 {
|
|||||||
pub fn is_hex(s: &str) -> bool {
|
pub fn is_hex(s: &str) -> bool {
|
||||||
s.chars().all(|x| char::is_ascii_hexdigit(&x))
|
s.chars().all(|x| char::is_ascii_hexdigit(&x))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check if a string contains only lower-case hex chars.
|
||||||
|
pub fn is_lower_hex(s: &str) -> bool {
|
||||||
|
s.chars().all(|x| {
|
||||||
|
(char::is_ascii_lowercase(&x) || char::is_ascii_digit(&x)) && char::is_ascii_hexdigit(&x)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn lower_hex() {
|
||||||
|
let hexstr = "abcd0123";
|
||||||
|
assert_eq!(is_lower_hex(hexstr), true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
110
tests/common/mod.rs
Normal file
110
tests/common/mod.rs
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use nostr_rs_relay::config;
|
||||||
|
use nostr_rs_relay::server::start_server;
|
||||||
|
//use http::{Request, Response};
|
||||||
|
use hyper::{Client, StatusCode, Uri};
|
||||||
|
use std::net::TcpListener;
|
||||||
|
use std::sync::atomic::{AtomicU16, Ordering};
|
||||||
|
use std::sync::mpsc as syncmpsc;
|
||||||
|
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
|
||||||
|
use std::thread;
|
||||||
|
use std::thread::JoinHandle;
|
||||||
|
use std::time::Duration;
|
||||||
|
use tracing::{debug, info};
|
||||||
|
|
||||||
|
pub struct Relay {
|
||||||
|
pub port: u16,
|
||||||
|
pub handle: JoinHandle<()>,
|
||||||
|
pub shutdown_tx: MpscSender<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn start_relay() -> Result<Relay> {
|
||||||
|
// setup tracing
|
||||||
|
let _trace_sub = tracing_subscriber::fmt::try_init();
|
||||||
|
info!("Starting a new relay");
|
||||||
|
// replace default settings
|
||||||
|
let mut settings = config::Settings::default();
|
||||||
|
// identify open port
|
||||||
|
info!("Checking for address...");
|
||||||
|
let port = get_available_port().unwrap();
|
||||||
|
info!("Found open port: {}", port);
|
||||||
|
// bind to local interface only
|
||||||
|
settings.network.address = "127.0.0.1".to_owned();
|
||||||
|
settings.network.port = port;
|
||||||
|
// create an in-memory DB with multiple readers
|
||||||
|
settings.database.in_memory = true;
|
||||||
|
settings.database.min_conn = 4;
|
||||||
|
settings.database.max_conn = 8;
|
||||||
|
let (shutdown_tx, shutdown_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
|
||||||
|
let handle = thread::spawn(|| {
|
||||||
|
// server will block the thread it is run on.
|
||||||
|
let _ = start_server(settings, shutdown_rx);
|
||||||
|
});
|
||||||
|
// how do we know the relay has finished starting up?
|
||||||
|
Ok(Relay {
|
||||||
|
port,
|
||||||
|
handle,
|
||||||
|
shutdown_tx,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if the server is healthy via HTTP request
|
||||||
|
async fn server_ready(relay: &Relay) -> Result<bool> {
|
||||||
|
let uri: String = format!("http://127.0.0.1:{}/", relay.port);
|
||||||
|
let client = Client::new();
|
||||||
|
let uri: Uri = uri.parse().unwrap();
|
||||||
|
let res = client.get(uri).await?;
|
||||||
|
Ok(res.status() == StatusCode::OK)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn wait_for_healthy_relay(relay: &Relay) -> Result<()> {
|
||||||
|
// TODO: maximum time to wait for server to become healthy.
|
||||||
|
// give it a little time to start up before we start polling
|
||||||
|
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||||
|
loop {
|
||||||
|
let server_check = server_ready(relay).await;
|
||||||
|
match server_check {
|
||||||
|
Ok(true) => {
|
||||||
|
// server responded with 200-OK.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Ok(false) => {
|
||||||
|
// server responded with an error, we're done.
|
||||||
|
return Err(anyhow!("Got non-200-OK from relay"));
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
// server is not yet ready, probably connection refused...
|
||||||
|
debug!("Relay not ready, will try again...");
|
||||||
|
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info!("relay is ready");
|
||||||
|
Ok(())
|
||||||
|
// simple message sent to web browsers
|
||||||
|
//let mut request = Request::builder()
|
||||||
|
// .uri("https://www.rust-lang.org/")
|
||||||
|
// .header("User-Agent", "my-awesome-agent/1.0");
|
||||||
|
}
|
||||||
|
|
||||||
|
// from https://elliotekj.com/posts/2017/07/25/find-available-tcp-port-rust/
|
||||||
|
// This needed some modification; if multiple tasks all ask for open ports, they will tend to get the same one.
|
||||||
|
// instead we should try to try these incrementally/globally.
|
||||||
|
|
||||||
|
static PORT_COUNTER: AtomicU16 = AtomicU16::new(4030);
|
||||||
|
|
||||||
|
fn get_available_port() -> Option<u16> {
|
||||||
|
let startsearch = PORT_COUNTER.fetch_add(10, Ordering::SeqCst);
|
||||||
|
if startsearch >= 20000 {
|
||||||
|
// wrap around
|
||||||
|
PORT_COUNTER.store(4030, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
(startsearch..20000).find(|port| port_is_available(*port))
|
||||||
|
}
|
||||||
|
pub fn port_is_available(port: u16) -> bool {
|
||||||
|
info!("checking on port {}", port);
|
||||||
|
match TcpListener::bind(("127.0.0.1", port)) {
|
||||||
|
Ok(_) => true,
|
||||||
|
Err(_) => false,
|
||||||
|
}
|
||||||
|
}
|
47
tests/integration_test.rs
Normal file
47
tests/integration_test.rs
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
|
||||||
|
use std::thread;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
mod common;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn start_and_stop() -> Result<()> {
|
||||||
|
// this will be the common pattern for acquiring a new relay:
|
||||||
|
// start a fresh relay, on a port to-be-provided back to us:
|
||||||
|
let relay = common::start_relay()?;
|
||||||
|
// wait for the relay's webserver to start up and deliver a page:
|
||||||
|
common::wait_for_healthy_relay(&relay).await?;
|
||||||
|
let port = relay.port;
|
||||||
|
// just make sure we can startup and shut down.
|
||||||
|
// if we send a shutdown message before the server is listening,
|
||||||
|
// we will get a SendError. Keep sending until someone is
|
||||||
|
// listening.
|
||||||
|
loop {
|
||||||
|
let shutdown_res = relay.shutdown_tx.send(());
|
||||||
|
match shutdown_res {
|
||||||
|
Ok(()) => {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
thread::sleep(Duration::from_millis(100));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// wait for relay to shutdown
|
||||||
|
let thread_join = relay.handle.join();
|
||||||
|
assert!(thread_join.is_ok());
|
||||||
|
// assert that port is now available.
|
||||||
|
assert!(common::port_is_available(port));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn relay_home_page() -> Result<()> {
|
||||||
|
// get a relay and wait for startup...
|
||||||
|
let relay = common::start_relay()?;
|
||||||
|
common::wait_for_healthy_relay(&relay).await?;
|
||||||
|
// tell relay to shutdown
|
||||||
|
let _res = relay.shutdown_tx.send(());
|
||||||
|
Ok(())
|
||||||
|
}
|
Reference in New Issue
Block a user