mirror of
https://github.com/scsibug/nostr-rs-relay.git
synced 2025-09-01 03:40:46 -04:00
Compare commits
220 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
2be75e18fb | ||
|
5f6ff4c2b7 | ||
|
df411c24fb | ||
|
39f9984c4f | ||
|
9d55731073 | ||
|
5638f70d66 | ||
|
98a08d054a | ||
|
0ef7d618a8 | ||
|
bf06bea808 | ||
|
e5ca8c2a86 | ||
|
8ea63f0b27 | ||
|
3229e4192f | ||
|
7fd9b55e70 | ||
|
5cecfba319 | ||
|
d0f57aea21 | ||
|
40abd6858e | ||
|
136e41d234 | ||
|
35a1973a46 | ||
|
1daa25600d | ||
|
692925942a | ||
|
84afd4b64e | ||
|
46160bb1f9 | ||
|
2fc9168a38 | ||
|
01d0d44868 | ||
|
93f6337fda | ||
|
f3a42712a6 | ||
|
27361d064a | ||
|
3bafb611e5 | ||
|
b960ab70de | ||
|
15e2f097aa | ||
|
185f9e7abb | ||
|
f44dae6ac9 | ||
|
abc356c17d | ||
|
81f8256c37 | ||
|
b3db2bd081 | ||
|
d31e974d56 | ||
|
36eaf9fea5 | ||
|
a16c4e698a | ||
|
e63d179424 | ||
|
28b7b83a6e | ||
|
2e42b1b86e | ||
|
bd07a11f50 | ||
|
bc4b45d4b8 | ||
|
1ca5d652de | ||
|
d7cceab8fc | ||
|
2805a96e5b | ||
|
ac14a0759f | ||
|
cdd4e5949f | ||
|
5999009779 | ||
|
e36c791c53 | ||
|
d95adbcb3d | ||
|
509736c56d | ||
|
8004ea9b44 | ||
|
866c239cc9 | ||
|
6012b57e95 | ||
|
559541b160 | ||
|
facaed7805 | ||
|
ba4fcd072a | ||
|
2b79099cfe | ||
|
eb1d2d717d | ||
|
e5e03d4378 | ||
|
c377b136aa | ||
|
bca5614a82 | ||
|
f7550b4c61 | ||
|
1623bacd0d | ||
|
2bbde8ad09 | ||
|
a42004c30c | ||
|
9dd97908cf | ||
|
ab749e9cf0 | ||
|
1820e9c689 | ||
|
2d3a35fe30 | ||
|
9c77b06f79 | ||
|
c8e8b71b91 | ||
|
6d57adef73 | ||
|
111eb4a10c | ||
|
214f152c5d | ||
|
3fcaf97a15 | ||
|
cec501b37f | ||
|
2557c7f69c | ||
|
3979a94726 | ||
|
71bdbfb425 | ||
|
b6798f96b6 | ||
|
c1152ce430 | ||
|
6f1a4e7d76 | ||
|
1804bee912 | ||
|
34db91940c | ||
|
0859e535ed | ||
|
bdd4e43df4 | ||
|
dfa6985f44 | ||
|
57e1b53c13 | ||
|
53f83aa923 | ||
|
34a8f99d61 | ||
|
c8f7420334 | ||
|
e2869e8fad | ||
|
5c07b2eca5 | ||
|
25752abe6b | ||
|
16f6e974c8 | ||
|
744d467a28 | ||
|
b094fbcabd | ||
|
4121c872bc | ||
|
6489e685ab | ||
|
6800c2e39d | ||
|
e996d4c009 | ||
|
2331c881d7 | ||
|
585fdd3884 | ||
|
cf3e67500f | ||
|
1d19442cfd | ||
|
13cc24b5cd | ||
|
f543957b34 | ||
|
7021f102e8 | ||
|
fddbf321bc | ||
|
3e7f2e21df | ||
|
9d9c6c78d1 | ||
|
703b2efe6e | ||
|
0db6487ce3 | ||
|
ba987d3212 | ||
|
73f4f60cc7 | ||
|
d06d227ebe | ||
|
3519488c4e | ||
|
fbd3315110 | ||
|
3d3d1bde53 | ||
|
ed336111bb | ||
|
8aed572989 | ||
|
62e8da689d | ||
|
807d1aa384 | ||
|
66a55b55b9 | ||
|
76c77c3e56 | ||
|
50daab8a6f | ||
|
ffd4e6f997 | ||
|
bbd716963e | ||
|
ca95e8cf22 | ||
|
e9d2a2cbd0 | ||
|
39a945b493 | ||
|
9a84dc19e9 | ||
|
20c4bb42eb | ||
|
0e519f6b77 | ||
|
3dd0f2c9c6 | ||
|
b7c8737166 | ||
|
c0b112c094 | ||
|
cb283ac316 | ||
|
2c6ac69bfd | ||
|
d929ae2752 | ||
|
14fe9f9ee1 | ||
|
7774db8c47 | ||
|
104ef2b9e1 | ||
|
c06139ec99 | ||
|
19ec89593d | ||
|
27902bc5f4 | ||
|
d2adddaee4 | ||
|
b23b3ce8ec | ||
|
5f9fe1ce59 | ||
|
6a8c4ed1b5 | ||
|
966c853700 | ||
|
65fd0ed08b | ||
|
0b51675b38 | ||
|
2e22334631 | ||
|
cb2ac4bf0f | ||
|
38dc7789dc | ||
|
ce0e00ffb3 | ||
|
3e4ae4aeec | ||
|
c6a8807485 | ||
|
8137b6211c | ||
|
29effaae23 | ||
|
e5074f2e46 | ||
|
4fd7643907 | ||
|
1e1ec69175 | ||
|
e08647867c | ||
|
ae0f7171ed | ||
|
4f1a912f36 | ||
|
95748647f0 | ||
|
25480e837f | ||
|
b80b54cd9d | ||
|
8ea732cbe5 | ||
|
0f68c4e5c2 | ||
|
dab2cd5792 | ||
|
f411aa6fc2 | ||
|
d31bbda087 | ||
|
5917bc53b2 | ||
|
91177c61a1 | ||
|
53c2a8051c | ||
|
168cf513ac | ||
|
ea204761c9 | ||
|
c270ae1434 | ||
|
64bd983cb6 | ||
|
1c153bc784 | ||
|
dc11d9a619 | ||
|
cd1557787b | ||
|
86bb7aeb9a | ||
|
ce37fc1a2d | ||
|
2cfd384339 | ||
|
8c013107f9 | ||
|
64a4466d30 | ||
|
1596c23eb4 | ||
|
129badd4e1 | ||
|
6f7c080180 | ||
|
af92561ef6 | ||
|
d833a3e40d | ||
|
462eb46642 | ||
|
cf144d503d | ||
|
fb8375aef2 | ||
|
88ac31b549 | ||
|
677b7d39e9 | ||
|
b24d2f9aaa | ||
|
7a3899d852 | ||
|
818108b793 | ||
|
d10348f7e1 | ||
|
8598e443d8 | ||
|
43222d44e5 | ||
|
7c1516c4fb | ||
|
0c72053a49 | ||
|
3f32ff67ab | ||
|
0b9778d6ca | ||
|
9be04120c7 | ||
|
cc06167e06 | ||
|
b6e33f044f | ||
|
1b2c6f9fca | ||
|
0d8d39ad22 | ||
|
0e851d4f71 | ||
|
3c880b2f49 | ||
|
7a4c9266ec |
@@ -7,6 +7,7 @@ environment:
|
||||
packages:
|
||||
- cargo
|
||||
- sqlite-devel
|
||||
- protobuf-compiler
|
||||
sources:
|
||||
- https://git.sr.ht/~gheartsfield/nostr-rs-relay/
|
||||
shell: false
|
||||
|
39
.github/workflows/ci.yml
vendored
Normal file
39
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Test and build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
test_nostr-rs-relay:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Update local toolchain
|
||||
run: |
|
||||
sudo apt-get install -y protobuf-compiler
|
||||
rustup update
|
||||
rustup component add clippy
|
||||
rustup install nightly
|
||||
|
||||
- name: Toolchain info
|
||||
run: |
|
||||
cargo --version --verbose
|
||||
rustc --version
|
||||
cargo clippy --version
|
||||
|
||||
# - name: Lint
|
||||
# run: |
|
||||
# cargo fmt -- --check
|
||||
# cargo clippy -- -D warnings
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
cargo check
|
||||
cargo test --all
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
cargo build --release --locked
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,2 +1,2 @@
|
||||
/target
|
||||
**/target/
|
||||
nostr.db
|
||||
|
@@ -11,6 +11,6 @@ repos:
|
||||
- repo: https://github.com/doublify/pre-commit-rust
|
||||
rev: v1.0
|
||||
hooks:
|
||||
- id: fmt
|
||||
# - id: fmt
|
||||
- id: cargo-check
|
||||
- id: clippy
|
||||
|
1440
Cargo.lock
generated
1440
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
18
Cargo.toml
18
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "nostr-rs-relay"
|
||||
version = "0.7.5"
|
||||
version = "0.8.8"
|
||||
edition = "2021"
|
||||
authors = ["Greg Heartsfield <scsibug@imap.cc>"]
|
||||
description = "A relay implementation for the Nostr protocol"
|
||||
@@ -12,9 +12,12 @@ keywords = ["nostr", "server"]
|
||||
categories = ["network-programming", "web-programming"]
|
||||
|
||||
[dependencies]
|
||||
clap = { version = "4.0.32", features = ["env", "default", "derive"]}
|
||||
tracing = "0.1.36"
|
||||
tracing-subscriber = "0.2.0"
|
||||
tokio = { version = "1", features = ["full", "tracing", "signal"] }
|
||||
prost = "0.11"
|
||||
tonic = "0.8.3"
|
||||
console-subscriber = "0.1.8"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
@@ -28,7 +31,7 @@ secp256k1 = {version = "0.21", features = ["rand", "rand-std", "serde", "bitcoin
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = {version = "1.0", features = ["preserve_order"]}
|
||||
hex = "0.4"
|
||||
rusqlite = { version = "0.26", features = ["limits","bundled"]}
|
||||
rusqlite = { version = "0.26", features = ["limits","bundled","modern_sqlite", "trace"]}
|
||||
r2d2 = "0.8"
|
||||
r2d2_sqlite = "0.19"
|
||||
lazy_static = "1.4"
|
||||
@@ -41,6 +44,17 @@ parse_duration = "2"
|
||||
rand = "0.8"
|
||||
const_format = "0.2.28"
|
||||
regex = "1"
|
||||
async-trait = "0.1.60"
|
||||
async-std = "1.12.0"
|
||||
sqlx = { version ="0.6.2", features=["runtime-tokio-rustls", "postgres", "chrono"]}
|
||||
chrono = "0.4.23"
|
||||
prometheus = "0.13.3"
|
||||
indicatif = "0.17.3"
|
||||
bech32 = "0.9.1"
|
||||
url = "2.3.1"
|
||||
|
||||
[dev-dependencies]
|
||||
anyhow = "1"
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = { version="0.8.3", features = ["prost"] }
|
||||
|
10
Dockerfile
10
Dockerfile
@@ -1,5 +1,7 @@
|
||||
FROM docker.io/library/rust:1.66.0@sha256:359949280cebefe93ccb33089fe25111a3aadfe99eac4b6cbe8ec3e1b571dacb as builder
|
||||
|
||||
FROM docker.io/library/rust:1-bookworm as builder
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y cmake protobuf-compiler \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN USER=root cargo install cargo-auditable
|
||||
RUN USER=root cargo new --bin nostr-rs-relay
|
||||
WORKDIR ./nostr-rs-relay
|
||||
@@ -12,12 +14,14 @@ RUN rm src/*.rs
|
||||
|
||||
# copy project source code
|
||||
COPY ./src ./src
|
||||
COPY ./proto ./proto
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# build auditable release using locked deps
|
||||
RUN rm ./target/release/deps/nostr*relay*
|
||||
RUN cargo auditable build --release --locked
|
||||
|
||||
FROM docker.io/library/debian:bullseye-20221205-slim@sha256:25f10b4f1ded5341a3ca0a30290ff3cd5639415f0c5a2222d5e7d5dd72952aa1
|
||||
FROM docker.io/library/debian:bookworm-slim
|
||||
|
||||
ARG APP=/usr/src/app
|
||||
ARG APP_DATA=/usr/src/app/db
|
||||
|
50
README.md
50
README.md
@@ -2,7 +2,8 @@
|
||||
|
||||
This is a [nostr](https://github.com/nostr-protocol/nostr) relay,
|
||||
written in Rust. It currently supports the entire relay protocol, and
|
||||
persists data with SQLite.
|
||||
persists data with SQLite. There is experimental support for
|
||||
Postgresql.
|
||||
|
||||
The project master repository is available on
|
||||
[sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is
|
||||
@@ -10,6 +11,9 @@ mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
|
||||
|
||||
[](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master?)
|
||||
|
||||

|
||||
|
||||
|
||||
## Features
|
||||
|
||||
[NIPs](https://github.com/nostr-protocol/nips) with a relay-specific implementation are listed here.
|
||||
@@ -28,7 +32,10 @@ mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
|
||||
- [x] NIP-16: [Event Treatment](https://github.com/nostr-protocol/nips/blob/master/16.md)
|
||||
- [x] NIP-20: [Command Results](https://github.com/nostr-protocol/nips/blob/master/20.md)
|
||||
- [x] NIP-22: [Event `created_at` limits](https://github.com/nostr-protocol/nips/blob/master/22.md) (_future-dated events only_)
|
||||
- [x] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md)
|
||||
- [ ] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md) (_implemented, but currently disabled_)
|
||||
- [x] NIP-28: [Public Chat](https://github.com/nostr-protocol/nips/blob/master/28.md)
|
||||
- [x] NIP-33: [Parameterized Replaceable Events](https://github.com/nostr-protocol/nips/blob/master/33.md)
|
||||
- [x] NIP-42: [Authentication of clients to relays](https://github.com/nostr-protocol/nips/blob/master/42.md)
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -81,6 +88,38 @@ Text Note [81cf...2652] from 296a...9b92 5 seconds ago
|
||||
A pre-built container is also available on DockerHub:
|
||||
https://hub.docker.com/r/scsibug/nostr-rs-relay
|
||||
|
||||
## Build and Run (without Docker)
|
||||
|
||||
Building `nostr-rs-relay` requires an installation of Cargo & Rust: https://www.rust-lang.org/tools/install
|
||||
|
||||
Clone this repository, and then build a release version of the relay:
|
||||
|
||||
```console
|
||||
$ git clone -q https://git.sr.ht/\~gheartsfield/nostr-rs-relay
|
||||
$ cd nostr-rs-relay
|
||||
$ cargo build -q -r
|
||||
```
|
||||
|
||||
The relay executable is now located in
|
||||
`target/release/nostr-rs-relay`. In order to run it with logging
|
||||
enabled, execute it with the `RUST_LOG` variable set:
|
||||
|
||||
```console
|
||||
$ RUST_LOG=warn,nostr_rs_relay=info ./target/release/nostr-rs-relay
|
||||
Dec 26 10:31:56.455 INFO nostr_rs_relay: Starting up from main
|
||||
Dec 26 10:31:56.464 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080
|
||||
Dec 26 10:31:56.466 INFO nostr_rs_relay::server: db writer created
|
||||
Dec 26 10:31:56.466 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=2)
|
||||
Dec 26 10:31:56.466 INFO nostr_rs_relay::db: opened database "./nostr.db" for writing
|
||||
Dec 26 10:31:56.466 INFO nostr_rs_relay::schema: DB version = 11
|
||||
Dec 26 10:31:56.467 INFO nostr_rs_relay::db: Built a connection pool "maintenance writer" (min=1, max=2)
|
||||
Dec 26 10:31:56.467 INFO nostr_rs_relay::server: control message listener started
|
||||
Dec 26 10:31:56.468 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=8)
|
||||
```
|
||||
|
||||
You now have a running relay, on port `8080`. Use a `nostr` client or
|
||||
`websocat` to connect and send/query for events.
|
||||
|
||||
## Configuration
|
||||
|
||||
The sample [`config.toml`](config.toml) file demonstrates the
|
||||
@@ -101,7 +140,7 @@ settings.
|
||||
|
||||
For examples of putting the relay behind a reverse proxy (for TLS
|
||||
termination, load balancing, and other features), see [Reverse
|
||||
Proxy](reverse-proxy.md).
|
||||
Proxy](docs/reverse-proxy.md).
|
||||
|
||||
## Dev Channel
|
||||
|
||||
@@ -115,3 +154,8 @@ To chat about `nostr-rs-relay` on `nostr` itself; visit our channel on [anigma](
|
||||
License
|
||||
---
|
||||
This project is MIT licensed.
|
||||
|
||||
External Documentation and Links
|
||||
---
|
||||
|
||||
* [BlockChainCaffe's Nostr Relay Setup Guide](https://github.com/BlockChainCaffe/Nostr-Relay-Setup-Guide)
|
||||
|
10
build.rs
Normal file
10
build.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
tonic_build::configure()
|
||||
.build_server(false)
|
||||
.protoc_arg("--experimental_allow_proto3_optional")
|
||||
.compile(
|
||||
&["proto/nauthz.proto"],
|
||||
&["proto"],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
65
config.toml
65
config.toml
@@ -16,18 +16,26 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
|
||||
# Administrative contact URI
|
||||
#contact = "mailto:contact@example.com"
|
||||
|
||||
# Favicon location. Relative to the current directory. Assumes an
|
||||
# ICO format.
|
||||
#favicon = "favicon.ico"
|
||||
|
||||
[diagnostics]
|
||||
# Enable tokio tracing (for use with tokio-console)
|
||||
#tracing = true
|
||||
#tracing = false
|
||||
|
||||
[database]
|
||||
# Database engine (sqlite/postgres). Defaults to sqlite.
|
||||
# Support for postgres is currently experimental.
|
||||
#engine = "sqlite"
|
||||
|
||||
# Directory for SQLite files. Defaults to the current directory. Can
|
||||
# also be specified (and overriden) with the "--db dirname" command
|
||||
# line option.
|
||||
data_directory = "."
|
||||
|
||||
#data_directory = "."
|
||||
|
||||
# Use an in-memory database instead of 'nostr.db'.
|
||||
# Requires sqlite engine.
|
||||
# Caution; this will not survive a process restart!
|
||||
#in_memory = false
|
||||
|
||||
@@ -36,8 +44,23 @@ data_directory = "."
|
||||
# Minimum number of SQLite reader connections
|
||||
#min_conn = 4
|
||||
|
||||
# Maximum number of SQLite reader connections
|
||||
#max_conn = 128
|
||||
# Maximum number of SQLite reader connections. Recommend setting this
|
||||
# to approx the number of cores.
|
||||
#max_conn = 8
|
||||
|
||||
# Database connection string. Required for postgres; not used for
|
||||
# sqlite.
|
||||
#connection = "postgresql://postgres:nostr@localhost:7500/nostr"
|
||||
|
||||
[grpc]
|
||||
# gRPC interfaces for externalized decisions and other extensions to
|
||||
# functionality.
|
||||
#
|
||||
# Events can be authorized through an external service, by providing
|
||||
# the URL below. In the event the server is not accessible, events
|
||||
# will be permitted. The protobuf3 schema used is available in
|
||||
# `proto/nauthz.proto`.
|
||||
# event_admission_server = "http://[::1]:50051"
|
||||
|
||||
[network]
|
||||
# Bind to this network address
|
||||
@@ -62,8 +85,29 @@ reject_future_seconds = 1800
|
||||
|
||||
[limits]
|
||||
# Limit events created per second, averaged over one minute. Must be
|
||||
# an integer. If not set (or set to 0), defaults to unlimited.
|
||||
#messages_per_sec = 0
|
||||
# an integer. If not set (or set to 0), there is no limit. Note:
|
||||
# this is for the server as a whole, not per-connection.
|
||||
#
|
||||
# Limiting event creation is highly recommended if your relay is
|
||||
# public!
|
||||
#
|
||||
#messages_per_sec = 5
|
||||
|
||||
# Limit client subscriptions created, averaged over one minute. Must
|
||||
# be an integer. If not set (or set to 0), defaults to unlimited.
|
||||
# Strongly recommended to set this to a low value such as 10 to ensure
|
||||
# fair service.
|
||||
#subscriptions_per_min = 0
|
||||
|
||||
# UNIMPLEMENTED...
|
||||
# Limit how many concurrent database connections a client can have.
|
||||
# This prevents a single client from starting too many expensive
|
||||
# database queries. Must be an integer. If not set (or set to 0),
|
||||
# defaults to unlimited (subject to subscription limits).
|
||||
#db_conns_per_client = 0
|
||||
|
||||
# Limit blocking threads used for database connections. Defaults to 16.
|
||||
#max_blocking_threads = 16
|
||||
|
||||
# Limit the maximum size of an EVENT message. Defaults to 128 KB.
|
||||
# Set to 0 for unlimited.
|
||||
@@ -83,6 +127,11 @@ reject_future_seconds = 1800
|
||||
# backpressure to senders if writes are slow.
|
||||
#event_persist_buffer = 4096
|
||||
|
||||
# Event kind blacklist. Events with these kinds will be discarded.
|
||||
#event_kind_blacklist = [
|
||||
# 70202,
|
||||
#]
|
||||
|
||||
[authorization]
|
||||
# Pubkey addresses in this array are whitelisted for event publishing.
|
||||
# Only valid events by these authors will be accepted, if the variable
|
||||
@@ -91,6 +140,8 @@ reject_future_seconds = 1800
|
||||
# "35d26e4690cbe1a898af61cc3515661eb5fa763b57bd0b42e45099c8b32fd50f",
|
||||
# "887645fef0ce0c3c1218d2f5d8e6132a19304cdc57cd20281d082f38cfea0072",
|
||||
#]
|
||||
# Enable NIP-42 authentication
|
||||
#nip42_auth = false
|
||||
|
||||
[verified_users]
|
||||
# NIP-05 verification of users. Can be "enabled" to require NIP-05
|
||||
|
122
docs/database-maintenance.md
Normal file
122
docs/database-maintenance.md
Normal file
@@ -0,0 +1,122 @@
|
||||
# Database Maintenance
|
||||
|
||||
`nostr-rs-relay` uses the SQLite embedded database to minimize
|
||||
dependencies and overall footprint of running a relay. If traffic is
|
||||
light, the relay should just run with very little need for
|
||||
intervention. For heavily trafficked relays, there are a number of
|
||||
steps that the operator may need to take to maintain performance and
|
||||
limit disk usage.
|
||||
|
||||
This maintenance guide is current as of version `0.8.2`. Future
|
||||
versions may incorporate and automate some of these steps.
|
||||
|
||||
## Backing Up the Database
|
||||
|
||||
To prevent data loss, the database should be backed up regularly. The
|
||||
recommended method is to use the `sqlite3` command to perform an
|
||||
"Online Backup". This can be done while the relay is running, queries
|
||||
can still run and events will be persisted during the backup.
|
||||
|
||||
The following commands will perform a backup of the database to a
|
||||
dated file, and then compress to minimize size:
|
||||
|
||||
```console
|
||||
BACKUP_FILE=/var/backups/nostr/`date +%Y%m%d_%H%M`.db
|
||||
sqlite3 -readonly /apps/nostr-relay/nostr.db ".backup $BACKUP_FILE"
|
||||
sqlite3 $BACKUP_FILE "vacuum;"
|
||||
bzip2 -9 $BACKUP_FILE
|
||||
```
|
||||
|
||||
Nostr events are very compressible. Expect a compression ratio on the
|
||||
order of 4:1, resulting in a 75% space saving.
|
||||
|
||||
## Vacuuming the Database
|
||||
|
||||
As the database is updated, it can become fragmented. Performing a
|
||||
full `vacuum` will rebuild the entire database file, and can reduce
|
||||
space. Running this may reduce the size of the database file,
|
||||
especially if a large amount of data was updated or deleted.
|
||||
|
||||
```console
|
||||
vacuum;
|
||||
```
|
||||
|
||||
## Clearing Hidden Events
|
||||
|
||||
When events are deleted, the event is not actually removed from the
|
||||
database. Instead, a flag `HIDDEN` is set to true for the event,
|
||||
which excludes it from search results. High volume replacements from
|
||||
profile or other replaceable events are deleted, not hidden, in the
|
||||
current version of the relay.
|
||||
|
||||
In the current version, removing hidden events should not result in
|
||||
significant space savings, but it can still be used if there is no
|
||||
desire to hold on to events that can never be re-broadcast.
|
||||
|
||||
```console
|
||||
PRAGMA foreign_keys = ON;
|
||||
delete from event where HIDDEN=true;
|
||||
```
|
||||
|
||||
## Manually Removing Events
|
||||
|
||||
For a variety of reasons, an operator may wish to remove some events
|
||||
from the database. The only way of achieving this today is with
|
||||
manually run SQL commands.
|
||||
|
||||
It is recommended to have a good backup prior to manually running SQL
|
||||
commands!
|
||||
|
||||
In all cases, it is mandatory to enable foreign keys, and this must be
|
||||
done for every connection. Otherwise, you will likely orphan rows in
|
||||
the `tag` table.
|
||||
|
||||
### Deleting Specific Event
|
||||
|
||||
```console
|
||||
PRAGMA foreign_keys = ON;
|
||||
delete from event where event_hash=x'00000000000c1271675dc86e3e1dd1336827bccabb90dc4c9d3b4465efefe00e';
|
||||
```
|
||||
|
||||
### Deleting All Events for Pubkey
|
||||
|
||||
```console
|
||||
PRAGMA foreign_keys = ON;
|
||||
delete from event where author=x'000000000002c7831d9c5a99f183afc2813a6f69a16edda7f6fc0ed8110566e6';
|
||||
```
|
||||
|
||||
### Deleting All Events of a Kind
|
||||
|
||||
|
||||
```console
|
||||
PRAGMA foreign_keys = ON;
|
||||
delete from event where kind=70202;
|
||||
```
|
||||
|
||||
### Deleting Old Events
|
||||
|
||||
In this scenario, we wish to delete any event that has been stored by
|
||||
our relay for more than 1 month. Crucially, this is based on when the
|
||||
event was stored, not when the event says it was created. If an event
|
||||
has a `created` field of 2 years ago, but was first sent to our relay
|
||||
yesterday, it would not be deleted in this scenario. Keep in mind, we
|
||||
do not track anything for re-broadcast events that we already have, so
|
||||
this is not a very effective way of implementing a "least recently
|
||||
seen" policy.
|
||||
|
||||
```console
|
||||
PRAGMA foreign_keys = ON;
|
||||
TODO!
|
||||
```
|
||||
|
||||
### Delete Profile Events with No Recent Events
|
||||
|
||||
Many users create profiles, post a "hello world" event, and then never
|
||||
appear again (likely using an ephemeral keypair that was lost in the
|
||||
browser cache). We can find these accounts and remove them after some
|
||||
time.
|
||||
|
||||
```console
|
||||
PRAGMA foreign_keys = ON;
|
||||
TODO!
|
||||
```
|
79
docs/grpc-extensions.md
Normal file
79
docs/grpc-extensions.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# gRPC Extensions Design Document
|
||||
|
||||
The relay will be extensible through gRPC endpoints, definable in the
|
||||
main configuration file. These will allow external programs to host
|
||||
logic for deciding things such as, should this event be persisted,
|
||||
should this connection be allowed, and should this subscription
|
||||
request be registered. The primary goal is allow for relay operator
|
||||
specific functionality that allows them to serve smaller communities
|
||||
and reduce spam and abuse.
|
||||
|
||||
This will likely evolve substantially, the first goal is to get a
|
||||
basic one-way service that lets an externalized program decide on
|
||||
event persistance. This does not represent the final state of gRPC
|
||||
extensibility in `nostr-rs-relay`.
|
||||
|
||||
## Considerations
|
||||
|
||||
Write event latency must not be significantly affected. However, the
|
||||
primary reason we are implementing this is spam/abuse protection, so
|
||||
we are willing to tolerate some increase in latency if that protects
|
||||
us against outages!
|
||||
|
||||
The interface should provide enough information to make simple
|
||||
decisions, without burdening the relay to do extra queries. The
|
||||
decision endpoint will be mostly responsible for maintaining state and
|
||||
gathering additional details.
|
||||
|
||||
## Design Overview
|
||||
|
||||
A gRPC server may be defined in the `config.toml` file. If it exists,
|
||||
the relay will attempt to connect to it and send a message for each
|
||||
`EVENT` command submitted by clients. If a successful response is
|
||||
returned indicating the event is permitted, the relay continues
|
||||
processing the event as normal. All existing whitelist, blacklist,
|
||||
and `NIP-05` validation checks are still performed and MAY still
|
||||
result in the event being rejected. If a successful response is
|
||||
returned indicated the decision is anything other than permit, then
|
||||
the relay MUST reject the event, and return a command result to the
|
||||
user (using `NIP-20`) indicating the event was blocked (optionally
|
||||
providing a message).
|
||||
|
||||
In the event there is an error in the gRPC interface, event processing
|
||||
proceeds as if gRPC was disabled (fail open). This allows gRPC
|
||||
servers to be deployed with minimal chance of causing a full relay
|
||||
outage.
|
||||
|
||||
## Design Details
|
||||
|
||||
Currently one procedure call is supported, `EventAdmit`, in the
|
||||
`Authorization` service. It accepts the following data in order to
|
||||
support authorization decisions:
|
||||
|
||||
- The event itself
|
||||
- The client IP that submitted the event
|
||||
- The client's HTTP origin header, if one exists
|
||||
- The client's HTTP user agent header, if one exists
|
||||
- The public key of the client, if `NIP-42` authentication was
|
||||
performed (not supported in the relay yet!)
|
||||
- The `NIP-05` associated with the event's public key, if it is known
|
||||
to the relay
|
||||
|
||||
A server providing authorization decisions will return the following:
|
||||
|
||||
- A decision to permit or deny the event
|
||||
- An optional message that explains why the event was denied, to be
|
||||
transmitted to the client
|
||||
|
||||
## Security Issues
|
||||
|
||||
There is little attempt to secure this interface, since it is intended
|
||||
for use processes running on the same host. It is recommended to
|
||||
ensure that the gRPC server providing the API is not exposed to the
|
||||
public Internet. Authorization server implementations should have
|
||||
their own security reviews performed.
|
||||
|
||||
A slow gRPC server could cause availability issues for event
|
||||
processing, since this is performed on a single thread. Avoid any
|
||||
expensive or long-running processes that could result from submitted
|
||||
events, since any client can initiate a gRPC call to the service.
|
@@ -68,13 +68,32 @@ http {
|
||||
server_name relay.example.com;
|
||||
ssl_certificate /etc/letsencrypt/live/relay.example.com/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/relay.example.com/privkey.pem;
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
ssl_protocols TLSv1.3 TLSv1.2;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_ecdh_curve secp521r1:secp384r1;
|
||||
ssl_ciphers EECDH+AESGCM:EECDH+AES256;
|
||||
|
||||
# Optional Diffie-Helmann parameters
|
||||
# Generate with openssl dhparam -out /etc/ssl/certs/dhparam.pem 4096
|
||||
#ssl_dhparam /etc/ssl/certs/dhparam.pem;
|
||||
|
||||
ssl_session_cache shared:TLS:2m;
|
||||
ssl_buffer_size 4k;
|
||||
|
||||
# OCSP stapling
|
||||
ssl_stapling on;
|
||||
ssl_stapling_verify on;
|
||||
resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001]; # Cloudflare
|
||||
|
||||
# Set HSTS to 365 days
|
||||
add_header Strict-Transport-Security 'max-age=31536000; includeSubDomains; preload' always;
|
||||
keepalive_timeout 70;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:8080;
|
||||
proxy_http_version 1.1;
|
||||
proxy_read_timeout 1d;
|
||||
proxy_send_timeout 1d;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
@@ -85,7 +104,7 @@ http {
|
||||
|
||||
### Nginx Notes
|
||||
|
||||
The above configuration was tested on `nginx` `1.18.0` was tested on `Ubuntu 20.04`.
|
||||
The above configuration was tested on `nginx` `1.18.0` on `Ubuntu` `20.04` and `22.04`
|
||||
|
||||
For help installing `nginx` on `Ubuntu`, see [this guide](https://www.digitalocean.com/community/tutorials/how-to-install-nginx-on-ubuntu-20-04).
|
||||
|
1010
examples/nauthz/Cargo.lock
generated
Normal file
1010
examples/nauthz/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
13
examples/nauthz/Cargo.toml
Normal file
13
examples/nauthz/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "nauthz-server"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
# Common dependencies
|
||||
tokio = { version = "1.0", features = ["rt-multi-thread", "macros"] }
|
||||
prost = "0.11"
|
||||
tonic = "0.8.3"
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = { version="0.8.3", features = ["prost"] }
|
7
examples/nauthz/build.rs
Normal file
7
examples/nauthz/build.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
tonic_build::configure()
|
||||
.build_server(false)
|
||||
.protoc_arg("--experimental_allow_proto3_optional")
|
||||
.compile(&["../../proto/nauthz.proto"], &["../../proto"])?;
|
||||
Ok(())
|
||||
}
|
60
examples/nauthz/src/main.rs
Normal file
60
examples/nauthz/src/main.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
use tonic::{transport::Server, Request, Response, Status};
|
||||
|
||||
use nauthz_grpc::authorization_server::{Authorization, AuthorizationServer};
|
||||
use nauthz_grpc::{Decision, EventReply, EventRequest};
|
||||
|
||||
pub mod nauthz_grpc {
|
||||
tonic::include_proto!("nauthz");
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct EventAuthz {
|
||||
allowed_kinds: Vec<u64>,
|
||||
}
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl Authorization for EventAuthz {
|
||||
async fn event_admit(
|
||||
&self,
|
||||
request: Request<EventRequest>,
|
||||
) -> Result<Response<EventReply>, Status> {
|
||||
let reply;
|
||||
let req = request.into_inner();
|
||||
let event = req.event.unwrap();
|
||||
let content_prefix: String = event.content.chars().take(40).collect();
|
||||
println!("recvd event, [kind={}, origin={:?}, nip05_domain={:?}, tag_count={}, content_sample={:?}]",
|
||||
event.kind, req.origin, req.nip05.map(|x| x.domain), event.tags.len(), content_prefix);
|
||||
// Permit any event with a whitelisted kind
|
||||
if self.allowed_kinds.contains(&event.kind) {
|
||||
println!("This looks fine! (kind={})", event.kind);
|
||||
reply = nauthz_grpc::EventReply {
|
||||
decision: Decision::Permit as i32,
|
||||
message: None,
|
||||
};
|
||||
} else {
|
||||
println!("Blocked! (kind={})", event.kind);
|
||||
reply = nauthz_grpc::EventReply {
|
||||
decision: Decision::Deny as i32,
|
||||
message: Some(format!("kind {} not permitted", event.kind)),
|
||||
};
|
||||
}
|
||||
Ok(Response::new(reply))
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let addr = "[::1]:50051".parse().unwrap();
|
||||
|
||||
// A simple authorization engine that allows kinds 0-3
|
||||
let checker = EventAuthz {
|
||||
allowed_kinds: vec![0, 1, 2, 3],
|
||||
};
|
||||
println!("EventAuthz Server listening on {}", addr);
|
||||
// Start serving
|
||||
Server::builder()
|
||||
.add_service(AuthorizationServer::new(checker))
|
||||
.serve(addr)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
@@ -1,3 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
sed -E 's/@sha256:[[:alnum:]]+//g' Dockerfile > Dockerfile.any-platform
|
||||
echo "Created platform-agnostic Dockerfile in 'Dockerfile.any-platform'"
|
60
proto/nauthz.proto
Normal file
60
proto/nauthz.proto
Normal file
@@ -0,0 +1,60 @@
|
||||
syntax = "proto3";
|
||||
|
||||
// Nostr Authorization Services
|
||||
package nauthz;
|
||||
|
||||
// Authorization for actions against a relay
|
||||
service Authorization {
|
||||
// Determine if an event should be admitted to the relay
|
||||
rpc EventAdmit(EventRequest) returns (EventReply) {}
|
||||
}
|
||||
|
||||
message Event {
|
||||
bytes id = 1; // 32-byte SHA256 hash of serialized event
|
||||
bytes pubkey = 2; // 32-byte public key of event creator
|
||||
fixed64 created_at = 3; // UNIX timestamp provided by event creator
|
||||
uint64 kind = 4; // event kind
|
||||
string content = 5; // arbitrary event contents
|
||||
repeated TagEntry tags = 6; // event tag array
|
||||
bytes sig = 7; // 32-byte signature of the event id
|
||||
// Individual values for a single tag
|
||||
message TagEntry {
|
||||
repeated string values = 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Event data and metadata for authorization decisions
|
||||
message EventRequest {
|
||||
Event event =
|
||||
1; // the event to be admitted for further relay processing
|
||||
optional string ip_addr =
|
||||
2; // IP address of the client that submitted the event
|
||||
optional string origin =
|
||||
3; // HTTP origin header from the client, if one exists
|
||||
optional string user_agent =
|
||||
4; // HTTP user-agent header from the client, if one exists
|
||||
optional bytes auth_pubkey =
|
||||
5; // the public key associated with a NIP-42 AUTH'd session, if
|
||||
// authentication occurred
|
||||
optional Nip05Name nip05 =
|
||||
6; // NIP-05 address associated with the event pubkey, if it is
|
||||
// known and has been validated by the relay
|
||||
// A NIP_05 verification record
|
||||
message Nip05Name {
|
||||
string local = 1;
|
||||
string domain = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// A permit or deny decision
|
||||
enum Decision {
|
||||
DECISION_UNSPECIFIED = 0;
|
||||
DECISION_PERMIT = 1; // Admit this event for further processing
|
||||
DECISION_DENY = 2; // Deny persisting or propagating this event
|
||||
}
|
||||
|
||||
// Response to a event authorization request
|
||||
message EventReply {
|
||||
Decision decision = 1; // decision to enforce
|
||||
optional string message = 2; // informative message for the client
|
||||
}
|
@@ -1 +1,4 @@
|
||||
edition = "2021"
|
||||
#max_width = 140
|
||||
#chain_width = 100
|
||||
#fn_call_width = 100
|
||||
|
175
src/bin/bulkloader.rs
Normal file
175
src/bin/bulkloader.rs
Normal file
@@ -0,0 +1,175 @@
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use nostr_rs_relay::utils::is_lower_hex;
|
||||
use tracing::info;
|
||||
use nostr_rs_relay::config;
|
||||
use nostr_rs_relay::event::{Event,single_char_tagname};
|
||||
use nostr_rs_relay::error::{Error, Result};
|
||||
use nostr_rs_relay::repo::sqlite::{PooledConnection, build_pool};
|
||||
use nostr_rs_relay::repo::sqlite_migration::{curr_db_version, DB_VERSION};
|
||||
use rusqlite::{OpenFlags, Transaction};
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
use rusqlite::params;
|
||||
|
||||
/// Bulk load JSONL data from STDIN to the database specified in config.toml (or ./nostr.db as a default).
|
||||
/// The database must already exist, this will not create a new one.
|
||||
/// Tested against schema v13.
|
||||
|
||||
pub fn main() -> Result<()> {
|
||||
let _trace_sub = tracing_subscriber::fmt::try_init();
|
||||
println!("Nostr-rs-relay Bulk Loader");
|
||||
// check for a database file, or create one.
|
||||
let settings = config::Settings::new(&None);
|
||||
if !Path::new(&settings.database.data_directory).is_dir() {
|
||||
info!("Database directory does not exist");
|
||||
return Err(Error::DatabaseDirError);
|
||||
}
|
||||
// Get a database pool
|
||||
let pool = build_pool("bulk-loader", &settings, OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, 1,4,false);
|
||||
{
|
||||
// check for database schema version
|
||||
let mut conn: PooledConnection = pool.get()?;
|
||||
let version = curr_db_version(&mut conn)?;
|
||||
info!("current version is: {:?}", version);
|
||||
// ensure the schema version is current.
|
||||
if version != DB_VERSION {
|
||||
info!("version is not current, exiting");
|
||||
panic!("cannot write to schema other than v{DB_VERSION}");
|
||||
}
|
||||
}
|
||||
// this channel will contain parsed events ready to be inserted
|
||||
let (event_tx, event_rx) = mpsc::sync_channel(100_000);
|
||||
// Thread for reading events
|
||||
let _stdin_reader_handler = thread::spawn(move || {
|
||||
let stdin = io::stdin();
|
||||
for readline in stdin.lines() {
|
||||
if let Ok(line) = readline {
|
||||
// try to parse a nostr event
|
||||
let eres: Result<Event, serde_json::Error> = serde_json::from_str(&line);
|
||||
if let Ok(mut e) = eres {
|
||||
if let Ok(()) = e.validate() {
|
||||
e.build_index();
|
||||
//debug!("Event: {:?}", e);
|
||||
event_tx.send(Some(e)).ok();
|
||||
} else {
|
||||
info!("could not validate event");
|
||||
}
|
||||
} else {
|
||||
info!("error reading event: {:?}", eres);
|
||||
}
|
||||
} else {
|
||||
// error reading
|
||||
info!("error reading: {:?}", readline);
|
||||
}
|
||||
}
|
||||
info!("finished parsing events");
|
||||
event_tx.send(None).ok();
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
});
|
||||
let mut conn: PooledConnection = pool.get()?;
|
||||
let mut events_read = 0;
|
||||
let event_batch_size =50_000;
|
||||
let mut new_events = 0;
|
||||
let mut has_more_events = true;
|
||||
while has_more_events {
|
||||
// begin a transaction
|
||||
let tx = conn.transaction()?;
|
||||
// read in batch_size events and commit
|
||||
for _ in 0..event_batch_size {
|
||||
match event_rx.recv() {
|
||||
Ok(Some(e)) => {
|
||||
events_read += 1;
|
||||
// ignore ephemeral events
|
||||
if !(e.kind >= 20000 && e.kind < 30000) {
|
||||
match write_event(&tx, e) {
|
||||
Ok(c) => {
|
||||
new_events += c;
|
||||
},
|
||||
Err(e) => {
|
||||
info!("error inserting event: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
Ok(None) => {
|
||||
// signal that the sender will never produce more
|
||||
// events
|
||||
has_more_events=false;
|
||||
break;
|
||||
},
|
||||
Err(_) => {
|
||||
info!("sender is closed");
|
||||
// sender is done
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("committed {} events...", new_events);
|
||||
tx.commit()?;
|
||||
conn.execute_batch("pragma wal_checkpoint(truncate)")?;
|
||||
|
||||
}
|
||||
info!("processed {} events", events_read);
|
||||
info!("stored {} new events", new_events);
|
||||
// get a connection for writing events
|
||||
// read standard in.
|
||||
info!("finished reading input");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write an event and update the tag table.
|
||||
/// Assumes the event has its index built.
|
||||
fn write_event(tx: &Transaction, e: Event) -> Result<usize> {
|
||||
let id_blob = hex::decode(&e.id).ok();
|
||||
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
|
||||
let delegator_blob: Option<Vec<u8>> = e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
|
||||
let event_str = serde_json::to_string(&e).ok();
|
||||
// ignore if the event hash is a duplicate.
|
||||
let ins_count = tx.execute(
|
||||
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, strftime('%s','now'), FALSE);",
|
||||
params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str]
|
||||
)?;
|
||||
if ins_count == 0 {
|
||||
return Ok(0);
|
||||
}
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_id = tx.last_insert_rowid();
|
||||
// look at each event, and each tag, creating new tag entries if appropriate.
|
||||
for t in e.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagnamechar_opt = single_char_tagname(tagname);
|
||||
if tagnamechar_opt.is_none() {
|
||||
continue;
|
||||
}
|
||||
// safe because len was > 1
|
||||
let tagval = t.get(1).unwrap();
|
||||
// insert as BLOB if we can restore it losslessly.
|
||||
// this means it needs to be even length and lowercase.
|
||||
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tagname, hex::decode(tagval).ok()],
|
||||
)?;
|
||||
} else {
|
||||
// otherwise, insert as text
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tagname, &tagval],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
if e.is_replaceable() {
|
||||
//let query = "SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1;";
|
||||
//let count: usize = tx.query_row(query, params![e.kind, pubkey_blob], |row| row.get(0))?;
|
||||
//info!("found {} rows that /would/ be preserved", count);
|
||||
match tx.execute(
|
||||
"DELETE FROM event WHERE kind=? and author=? and id NOT IN (SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1);",
|
||||
params![e.kind, pubkey_blob, e.kind, pubkey_blob],
|
||||
) {
|
||||
Ok(_) => {},
|
||||
Err(x) => {info!("error deleting replaceable event: {:?}",x);}
|
||||
}
|
||||
}
|
||||
Ok(ins_count)
|
||||
}
|
20
src/cli.rs
Normal file
20
src/cli.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
use clap::Parser;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(about = "A nostr relay written in Rust", author = env!("CARGO_PKG_AUTHORS"), version = env!("CARGO_PKG_VERSION"))]
|
||||
pub struct CLIArgs {
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
help = "Use the <directory> as the location of the database",
|
||||
required = false,
|
||||
)]
|
||||
pub db: Option<String>,
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
help = "Use the <file name> as the location of the config file",
|
||||
required = false,
|
||||
)]
|
||||
pub config: Option<String>,
|
||||
}
|
@@ -12,15 +12,24 @@ pub struct Info {
|
||||
pub description: Option<String>,
|
||||
pub pubkey: Option<String>,
|
||||
pub contact: Option<String>,
|
||||
pub favicon: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Database {
|
||||
pub data_directory: String,
|
||||
pub engine: String,
|
||||
pub in_memory: bool,
|
||||
pub min_conn: u32,
|
||||
pub max_conn: u32,
|
||||
pub connection: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Grpc {
|
||||
pub event_admission_server: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -52,17 +61,22 @@ pub struct Retention {
|
||||
#[allow(unused)]
|
||||
pub struct Limits {
|
||||
pub messages_per_sec: Option<u32>, // Artificially slow down event writing to limit disk consumption (averaged over 1 minute)
|
||||
pub subscriptions_per_min: Option<u32>, // Artificially slow down request (db query) creation to prevent abuse (averaged over 1 minute)
|
||||
pub db_conns_per_client: Option<u32>, // How many concurrent database queries (not subscriptions) may a client have?
|
||||
pub max_blocking_threads: usize,
|
||||
pub max_event_bytes: Option<usize>, // Maximum size of an EVENT message
|
||||
pub max_ws_message_bytes: Option<usize>,
|
||||
pub max_ws_frame_bytes: Option<usize>,
|
||||
pub broadcast_buffer: usize, // events to buffer for subscribers (prevents slow readers from consuming memory)
|
||||
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
|
||||
pub event_kind_blacklist: Option<Vec<u64>>
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Authorization {
|
||||
pub pubkey_whitelist: Option<Vec<String>>, // If present, only allow these pubkeys to publish events
|
||||
pub nip42_auth: bool, // if true enables NIP-42 authentication
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -139,6 +153,7 @@ pub struct Settings {
|
||||
pub info: Info,
|
||||
pub diagnostics: Diagnostics,
|
||||
pub database: Database,
|
||||
pub grpc: Grpc,
|
||||
pub network: Network,
|
||||
pub limits: Limits,
|
||||
pub authorization: Authorization,
|
||||
@@ -149,10 +164,10 @@ pub struct Settings {
|
||||
|
||||
impl Settings {
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
pub fn new(config_file_name: &Option<String>) -> Self {
|
||||
let default_settings = Self::default();
|
||||
// attempt to construct settings with file
|
||||
let from_file = Self::new_from_default(&default_settings);
|
||||
let from_file = Self::new_from_default(&default_settings, config_file_name);
|
||||
match from_file {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
@@ -162,13 +177,19 @@ impl Settings {
|
||||
}
|
||||
}
|
||||
|
||||
fn new_from_default(default: &Settings) -> Result<Self, ConfigError> {
|
||||
|
||||
fn new_from_default(default: &Settings, config_file_name: &Option<String>) -> Result<Self, ConfigError> {
|
||||
let default_config_file_name = "config.toml".to_string();
|
||||
let config: &String = match config_file_name {
|
||||
Some(value) => value,
|
||||
None => &default_config_file_name
|
||||
};
|
||||
let builder = Config::builder();
|
||||
let config: Config = builder
|
||||
// use defaults
|
||||
// use defaults
|
||||
.add_source(Config::try_from(default)?)
|
||||
// override with file contents
|
||||
.add_source(File::with_name("config.toml"))
|
||||
// override with file contents
|
||||
.add_source(File::with_name(config))
|
||||
.build()?;
|
||||
let mut settings: Settings = config.try_deserialize()?;
|
||||
// ensure connection pool size is logical
|
||||
@@ -198,13 +219,19 @@ impl Default for Settings {
|
||||
description: None,
|
||||
pubkey: None,
|
||||
contact: None,
|
||||
favicon: None,
|
||||
},
|
||||
diagnostics: Diagnostics { tracing: false },
|
||||
database: Database {
|
||||
data_directory: ".".to_owned(),
|
||||
engine: "sqlite".to_owned(),
|
||||
in_memory: false,
|
||||
min_conn: 4,
|
||||
max_conn: 128,
|
||||
max_conn: 8,
|
||||
connection: "".to_owned(),
|
||||
},
|
||||
grpc: Grpc {
|
||||
event_admission_server: None,
|
||||
},
|
||||
network: Network {
|
||||
port: 8080,
|
||||
@@ -214,14 +241,19 @@ impl Default for Settings {
|
||||
},
|
||||
limits: Limits {
|
||||
messages_per_sec: None,
|
||||
subscriptions_per_min: None,
|
||||
db_conns_per_client: None,
|
||||
max_blocking_threads: 16,
|
||||
max_event_bytes: Some(2 << 17), // 128K
|
||||
max_ws_message_bytes: Some(2 << 17), // 128K
|
||||
max_ws_frame_bytes: Some(2 << 17), // 128K
|
||||
broadcast_buffer: 16384,
|
||||
event_persist_buffer: 4096,
|
||||
event_kind_blacklist: None,
|
||||
},
|
||||
authorization: Authorization {
|
||||
pubkey_whitelist: None, // Allow any address to publish
|
||||
nip42_auth: false, // Disable NIP-42 authentication
|
||||
},
|
||||
verified_users: VerifiedUsers {
|
||||
mode: VerifiedUsersMode::Disabled,
|
||||
|
139
src/conn.rs
139
src/conn.rs
@@ -1,26 +1,42 @@
|
||||
//! Client connection state
|
||||
use std::collections::HashMap;
|
||||
|
||||
use tracing::{debug, trace};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::close::Close;
|
||||
use crate::conn::Nip42AuthState::{AuthPubkey, Challenge, NoAuth};
|
||||
use crate::error::Error;
|
||||
use crate::error::Result;
|
||||
|
||||
use crate::event::Event;
|
||||
use crate::subscription::Subscription;
|
||||
use std::collections::HashMap;
|
||||
use tracing::{debug, info};
|
||||
use uuid::Uuid;
|
||||
use crate::utils::{host_str, unix_time};
|
||||
|
||||
/// A subscription identifier has a maximum length
|
||||
const MAX_SUBSCRIPTION_ID_LEN: usize = 256;
|
||||
|
||||
/// NIP-42 authentication state
|
||||
pub enum Nip42AuthState {
|
||||
/// The client is not authenticated yet
|
||||
NoAuth,
|
||||
/// The AUTH challenge sent
|
||||
Challenge(String),
|
||||
/// The client is authenticated
|
||||
AuthPubkey(String),
|
||||
}
|
||||
|
||||
/// State for a client connection
|
||||
pub struct ClientConn {
|
||||
/// Client IP (either from socket, or configured proxy header
|
||||
client_ip: String,
|
||||
client_ip_addr: String,
|
||||
/// Unique client identifier generated at connection time
|
||||
client_id: Uuid,
|
||||
/// The current set of active client subscriptions
|
||||
subscriptions: HashMap<String, Subscription>,
|
||||
/// Per-connection maximum concurrent subscriptions
|
||||
max_subs: usize,
|
||||
/// NIP-42 AUTH
|
||||
auth: Nip42AuthState,
|
||||
}
|
||||
|
||||
impl Default for ClientConn {
|
||||
@@ -32,20 +48,28 @@ impl Default for ClientConn {
|
||||
impl ClientConn {
|
||||
/// Create a new, empty connection state.
|
||||
#[must_use]
|
||||
pub fn new(client_ip: String) -> Self {
|
||||
pub fn new(client_ip_addr: String) -> Self {
|
||||
let client_id = Uuid::new_v4();
|
||||
ClientConn {
|
||||
client_ip,
|
||||
client_ip_addr,
|
||||
client_id,
|
||||
subscriptions: HashMap::new(),
|
||||
max_subs: 32,
|
||||
auth: NoAuth,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
|
||||
&self.subscriptions
|
||||
}
|
||||
|
||||
/// Check if the given subscription already exists
|
||||
#[must_use]
|
||||
pub fn has_subscription(&self, sub: &Subscription) -> bool {
|
||||
self.subscriptions.values().any(|x| x == sub)
|
||||
}
|
||||
|
||||
/// Get a short prefix of the client's unique identifier, suitable
|
||||
/// for logging.
|
||||
#[must_use]
|
||||
@@ -55,7 +79,23 @@ impl ClientConn {
|
||||
|
||||
#[must_use]
|
||||
pub fn ip(&self) -> &str {
|
||||
&self.client_ip
|
||||
&self.client_ip_addr
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn auth_pubkey(&self) -> Option<&String> {
|
||||
match &self.auth {
|
||||
AuthPubkey(pubkey) => Some(pubkey),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn auth_challenge(&self) -> Option<&String> {
|
||||
match &self.auth {
|
||||
Challenge(pubkey) => Some(pubkey),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a new subscription for this connection.
|
||||
@@ -69,7 +109,7 @@ impl ClientConn {
|
||||
// prevent arbitrarily long subscription identifiers from
|
||||
// being used.
|
||||
if sub_id_len > MAX_SUBSCRIPTION_ID_LEN {
|
||||
info!(
|
||||
debug!(
|
||||
"ignoring sub request with excessive length: ({})",
|
||||
sub_id_len
|
||||
);
|
||||
@@ -79,7 +119,7 @@ impl ClientConn {
|
||||
if self.subscriptions.contains_key(&k) {
|
||||
self.subscriptions.remove(&k);
|
||||
self.subscriptions.insert(k, s.clone());
|
||||
debug!(
|
||||
trace!(
|
||||
"replaced existing subscription (cid: {}, sub: {:?})",
|
||||
self.get_client_prefix(),
|
||||
s.get_id()
|
||||
@@ -93,7 +133,7 @@ impl ClientConn {
|
||||
}
|
||||
// add subscription
|
||||
self.subscriptions.insert(k, s);
|
||||
debug!(
|
||||
trace!(
|
||||
"registered new subscription, currently have {} active subs (cid: {})",
|
||||
self.subscriptions.len(),
|
||||
self.get_client_prefix(),
|
||||
@@ -105,10 +145,85 @@ impl ClientConn {
|
||||
pub fn unsubscribe(&mut self, c: &Close) {
|
||||
// TODO: return notice if subscription did not exist.
|
||||
self.subscriptions.remove(&c.id);
|
||||
debug!(
|
||||
trace!(
|
||||
"removed subscription, currently have {} active subs (cid: {})",
|
||||
self.subscriptions.len(),
|
||||
self.get_client_prefix(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn generate_auth_challenge(&mut self) {
|
||||
self.auth = Challenge(Uuid::new_v4().to_string());
|
||||
}
|
||||
|
||||
pub fn authenticate(&mut self, event: &Event, relay_url: &String) -> Result<()> {
|
||||
match &self.auth {
|
||||
Challenge(_) => (),
|
||||
AuthPubkey(_) => {
|
||||
// already authenticated
|
||||
return Ok(())
|
||||
},
|
||||
NoAuth => {
|
||||
// unexpected AUTH request
|
||||
return Err(Error::AuthFailure);
|
||||
},
|
||||
}
|
||||
match event.validate() {
|
||||
Ok(_) => {
|
||||
if event.kind != 22242 {
|
||||
return Err(Error::AuthFailure);
|
||||
}
|
||||
|
||||
let curr_time = unix_time();
|
||||
let past_cutoff = curr_time - 600; // 10 minutes
|
||||
let future_cutoff = curr_time + 600; // 10 minutes
|
||||
if event.created_at < past_cutoff || event.created_at > future_cutoff {
|
||||
return Err(Error::AuthFailure);
|
||||
}
|
||||
|
||||
let mut challenge: Option<&String> = None;
|
||||
let mut relay: Option<&String> = None;
|
||||
|
||||
for tag in &event.tags {
|
||||
if tag.len() == 2 && tag.get(0) == Some(&"challenge".into()) {
|
||||
challenge = tag.get(1);
|
||||
}
|
||||
if tag.len() == 2 && tag.get(0) == Some(&"relay".into()) {
|
||||
relay = tag.get(1);
|
||||
}
|
||||
}
|
||||
|
||||
match (challenge, &self.auth) {
|
||||
(Some(received_challenge), Challenge(sent_challenge)) => {
|
||||
if received_challenge != sent_challenge {
|
||||
return Err(Error::AuthFailure);
|
||||
}
|
||||
}
|
||||
(_, _) => {
|
||||
return Err(Error::AuthFailure);
|
||||
}
|
||||
}
|
||||
|
||||
match (relay.and_then(|url| host_str(url)), host_str(relay_url)) {
|
||||
(Some(received_relay), Some(our_relay)) => {
|
||||
if received_relay != our_relay {
|
||||
return Err(Error::AuthFailure);
|
||||
}
|
||||
}
|
||||
(_, _) => {
|
||||
return Err(Error::AuthFailure);
|
||||
}
|
||||
}
|
||||
|
||||
self.auth = AuthPubkey(event.pubkey.clone());
|
||||
trace!(
|
||||
"authenticated pubkey {} (cid: {})",
|
||||
event.pubkey.chars().take(8).collect::<String>(),
|
||||
self.get_client_prefix()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
Err(_) => Err(Error::AuthFailure),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
895
src/db.rs
895
src/db.rs
@@ -1,30 +1,23 @@
|
||||
//! Event persistence and querying
|
||||
//use crate::config::SETTINGS;
|
||||
use crate::config::Settings;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::event::{single_char_tagname, Event};
|
||||
use crate::hexrange::hex_range;
|
||||
use crate::hexrange::HexSearch;
|
||||
use crate::nip05;
|
||||
use crate::event::Event;
|
||||
use crate::notice::Notice;
|
||||
use crate::schema::{upgrade_db, STARTUP_SQL};
|
||||
use crate::subscription::ReqFilter;
|
||||
use crate::subscription::Subscription;
|
||||
use crate::utils::{is_hex, is_lower_hex};
|
||||
use crate::server::NostrMetrics;
|
||||
use crate::nauthz;
|
||||
use governor::clock::Clock;
|
||||
use governor::{Quota, RateLimiter};
|
||||
use hex;
|
||||
use r2d2;
|
||||
use r2d2_sqlite::SqliteConnectionManager;
|
||||
use rusqlite::params;
|
||||
use rusqlite::types::ToSql;
|
||||
use rusqlite::OpenFlags;
|
||||
use std::fmt::Write as _;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use tokio::task;
|
||||
use sqlx::pool::PoolOptions;
|
||||
use sqlx::postgres::PgConnectOptions;
|
||||
use sqlx::ConnectOptions;
|
||||
use crate::repo::sqlite::SqliteRepo;
|
||||
use crate::repo::postgres::{PostgresRepo,PostgresPool};
|
||||
use crate::repo::NostrRepo;
|
||||
use std::time::{Instant, Duration};
|
||||
use tracing::log::LevelFilter;
|
||||
use tracing::{debug, info, trace, warn};
|
||||
|
||||
pub type SqlitePool = r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>;
|
||||
@@ -34,369 +27,308 @@ pub type PooledConnection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnection
|
||||
pub struct SubmittedEvent {
|
||||
pub event: Event,
|
||||
pub notice_tx: tokio::sync::mpsc::Sender<Notice>,
|
||||
pub source_ip: String,
|
||||
pub origin: Option<String>,
|
||||
pub user_agent: Option<String>,
|
||||
pub auth_pubkey: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
/// Database file
|
||||
pub const DB_FILE: &str = "nostr.db";
|
||||
|
||||
/// Build a database connection pool.
|
||||
/// Build repo
|
||||
/// # Panics
|
||||
///
|
||||
/// Will panic if the pool could not be created.
|
||||
#[must_use]
|
||||
pub fn build_pool(
|
||||
name: &str,
|
||||
settings: &Settings,
|
||||
flags: OpenFlags,
|
||||
min_size: u32,
|
||||
max_size: u32,
|
||||
wait_for_db: bool,
|
||||
) -> SqlitePool {
|
||||
let db_dir = &settings.database.data_directory;
|
||||
let full_path = Path::new(db_dir).join(DB_FILE);
|
||||
// small hack; if the database doesn't exist yet, that means the
|
||||
// writer thread hasn't finished. Give it a chance to work. This
|
||||
// is only an issue with the first time we run.
|
||||
if !settings.database.in_memory {
|
||||
while !full_path.exists() && wait_for_db {
|
||||
debug!("Database reader pool is waiting on the database to be created...");
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
}
|
||||
pub async fn build_repo(settings: &Settings, metrics: NostrMetrics) -> Arc<dyn NostrRepo> {
|
||||
match settings.database.engine.as_str() {
|
||||
"sqlite" => {Arc::new(build_sqlite_pool(settings, metrics).await)},
|
||||
"postgres" => {Arc::new(build_postgres_pool(settings, metrics).await)},
|
||||
_ => panic!("Unknown database engine"),
|
||||
}
|
||||
let manager = if settings.database.in_memory {
|
||||
SqliteConnectionManager::memory()
|
||||
.with_flags(flags)
|
||||
.with_init(|c| c.execute_batch(STARTUP_SQL))
|
||||
} else {
|
||||
SqliteConnectionManager::file(&full_path)
|
||||
.with_flags(flags)
|
||||
.with_init(|c| c.execute_batch(STARTUP_SQL))
|
||||
};
|
||||
let pool: SqlitePool = r2d2::Pool::builder()
|
||||
.test_on_check_out(true) // no noticeable performance hit
|
||||
.min_idle(Some(min_size))
|
||||
.max_size(max_size)
|
||||
.build(manager)
|
||||
.unwrap();
|
||||
info!(
|
||||
"Built a connection pool {:?} (min={}, max={})",
|
||||
name, min_size, max_size
|
||||
);
|
||||
pool
|
||||
}
|
||||
|
||||
/// Spawn a database writer that persists events to the SQLite store.
|
||||
async fn build_sqlite_pool(settings: &Settings, metrics: NostrMetrics) -> SqliteRepo {
|
||||
let repo = SqliteRepo::new(settings, metrics);
|
||||
repo.start().await.ok();
|
||||
repo.migrate_up().await.ok();
|
||||
repo
|
||||
}
|
||||
|
||||
async fn build_postgres_pool(settings: &Settings, metrics: NostrMetrics) -> PostgresRepo {
|
||||
let mut options: PgConnectOptions = settings.database.connection.as_str().parse().unwrap();
|
||||
options.log_statements(LevelFilter::Debug);
|
||||
options.log_slow_statements(LevelFilter::Warn, Duration::from_secs(60));
|
||||
|
||||
let pool: PostgresPool = PoolOptions::new()
|
||||
.max_connections(settings.database.max_conn)
|
||||
.min_connections(settings.database.min_conn)
|
||||
.idle_timeout(Duration::from_secs(60))
|
||||
.connect_with(options)
|
||||
.await
|
||||
.unwrap();
|
||||
let repo = PostgresRepo::new(pool, metrics);
|
||||
// Panic on migration failure
|
||||
let version = repo.migrate_up().await.unwrap();
|
||||
info!("Postgres migration completed, at v{}", version);
|
||||
// startup scheduled tasks
|
||||
repo.start().await.ok();
|
||||
repo
|
||||
}
|
||||
|
||||
/// Spawn a database writer that persists events to the `SQLite` store.
|
||||
pub async fn db_writer(
|
||||
repo: Arc<dyn NostrRepo>,
|
||||
settings: Settings,
|
||||
mut event_rx: tokio::sync::mpsc::Receiver<SubmittedEvent>,
|
||||
bcast_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
metadata_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
mut shutdown: tokio::sync::broadcast::Receiver<()>,
|
||||
) -> tokio::task::JoinHandle<Result<()>> {
|
||||
) -> Result<()> {
|
||||
// are we performing NIP-05 checking?
|
||||
let nip05_active = settings.verified_users.is_active();
|
||||
// are we requriing NIP-05 user verification?
|
||||
let nip05_enabled = settings.verified_users.is_enabled();
|
||||
|
||||
task::spawn_blocking(move || {
|
||||
let db_dir = &settings.database.data_directory;
|
||||
let full_path = Path::new(db_dir).join(DB_FILE);
|
||||
// create a connection pool
|
||||
let pool = build_pool(
|
||||
"event writer",
|
||||
&settings,
|
||||
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
|
||||
1,
|
||||
4,
|
||||
false,
|
||||
);
|
||||
if settings.database.in_memory {
|
||||
info!("using in-memory database, this will not persist a restart!");
|
||||
//upgrade_db(&mut pool.get()?)?;
|
||||
|
||||
// Make a copy of the whitelist
|
||||
let whitelist = &settings.authorization.pubkey_whitelist.clone();
|
||||
|
||||
// get rate limit settings
|
||||
let rps_setting = settings.limits.messages_per_sec;
|
||||
let mut most_recent_rate_limit = Instant::now();
|
||||
let mut lim_opt = None;
|
||||
let clock = governor::clock::QuantaClock::default();
|
||||
if let Some(rps) = rps_setting {
|
||||
if rps > 0 {
|
||||
info!("Enabling rate limits for event creation ({}/sec)", rps);
|
||||
let quota = core::num::NonZeroU32::new(rps * 60).unwrap();
|
||||
lim_opt = Some(RateLimiter::direct(Quota::per_minute(quota)));
|
||||
}
|
||||
}
|
||||
// create a client if GRPC is enabled.
|
||||
// Check with externalized event admitter service, if one is defined.
|
||||
let mut grpc_client = if let Some(svr) = settings.grpc.event_admission_server {
|
||||
Some(nauthz::EventAuthzService::connect(&svr).await)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
//let gprc_client = settings.grpc.event_admission_server.map(|s| {
|
||||
// event_admitter_connect(&s);
|
||||
// });
|
||||
|
||||
loop {
|
||||
if shutdown.try_recv().is_ok() {
|
||||
info!("shutting down database writer");
|
||||
break;
|
||||
}
|
||||
// call blocking read on channel
|
||||
let next_event = event_rx.recv().await;
|
||||
// if the channel has closed, we will never get work
|
||||
if next_event.is_none() {
|
||||
break;
|
||||
}
|
||||
// track if an event write occurred; this is used to
|
||||
// update the rate limiter
|
||||
let mut event_write = false;
|
||||
let subm_event = next_event.unwrap();
|
||||
let event = subm_event.event;
|
||||
let notice_tx = subm_event.notice_tx;
|
||||
// check if this event is authorized.
|
||||
if let Some(allowed_addrs) = whitelist {
|
||||
// TODO: incorporate delegated pubkeys
|
||||
// if the event address is not in allowed_addrs.
|
||||
if !allowed_addrs.contains(&event.pubkey) {
|
||||
debug!(
|
||||
"rejecting event: {}, unauthorized author",
|
||||
event.get_event_id_prefix()
|
||||
);
|
||||
notice_tx
|
||||
.try_send(Notice::blocked(
|
||||
event.id,
|
||||
"pubkey is not allowed to publish to this relay",
|
||||
))
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Check that event kind isn't blacklisted
|
||||
let kinds_blacklist = &settings.limits.event_kind_blacklist.clone();
|
||||
if let Some(event_kind_blacklist) = kinds_blacklist {
|
||||
if event_kind_blacklist.contains(&event.kind) {
|
||||
debug!(
|
||||
"rejecting event: {}, blacklisted kind: {}",
|
||||
&event.get_event_id_prefix(),
|
||||
&event.kind
|
||||
);
|
||||
notice_tx
|
||||
.try_send(Notice::blocked(
|
||||
event.id,
|
||||
"event kind is blocked by relay"
|
||||
))
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// send any metadata events to the NIP-05 verifier
|
||||
if nip05_active && event.is_kind_metadata() {
|
||||
// we are sending this prior to even deciding if we
|
||||
// persist it. this allows the nip05 module to
|
||||
// inspect it, update if necessary, or persist a new
|
||||
// event and broadcast it itself.
|
||||
metadata_tx.send(event.clone()).ok();
|
||||
}
|
||||
|
||||
// get a validation result for use in verification and GPRC
|
||||
let validation = if nip05_active {
|
||||
Some(repo.get_latest_user_verification(&event.pubkey).await)
|
||||
} else {
|
||||
info!("opened database {:?} for writing", full_path);
|
||||
}
|
||||
upgrade_db(&mut pool.get()?)?;
|
||||
None
|
||||
};
|
||||
|
||||
// Make a copy of the whitelist
|
||||
let whitelist = &settings.authorization.pubkey_whitelist.clone();
|
||||
// check for NIP-05 verification
|
||||
if nip05_enabled && validation.is_some() {
|
||||
match validation.as_ref().unwrap() {
|
||||
Ok(uv) => {
|
||||
if uv.is_valid(&settings.verified_users) {
|
||||
info!(
|
||||
"new event from verified author ({:?},{:?})",
|
||||
uv.name.to_string(),
|
||||
event.get_author_prefix()
|
||||
);
|
||||
|
||||
// get rate limit settings
|
||||
let rps_setting = settings.limits.messages_per_sec;
|
||||
let mut most_recent_rate_limit = Instant::now();
|
||||
let mut lim_opt = None;
|
||||
let clock = governor::clock::QuantaClock::default();
|
||||
if let Some(rps) = rps_setting {
|
||||
if rps > 0 {
|
||||
info!("Enabling rate limits for event creation ({}/sec)", rps);
|
||||
let quota = core::num::NonZeroU32::new(rps * 60).unwrap();
|
||||
lim_opt = Some(RateLimiter::direct(Quota::per_minute(quota)));
|
||||
}
|
||||
}
|
||||
loop {
|
||||
if shutdown.try_recv().is_ok() {
|
||||
info!("shutting down database writer");
|
||||
break;
|
||||
}
|
||||
// call blocking read on channel
|
||||
let next_event = event_rx.blocking_recv();
|
||||
// if the channel has closed, we will never get work
|
||||
if next_event.is_none() {
|
||||
break;
|
||||
}
|
||||
// track if an event write occurred; this is used to
|
||||
// update the rate limiter
|
||||
let mut event_write = false;
|
||||
let subm_event = next_event.unwrap();
|
||||
let event = subm_event.event;
|
||||
let notice_tx = subm_event.notice_tx;
|
||||
// check if this event is authorized.
|
||||
if let Some(allowed_addrs) = whitelist {
|
||||
// TODO: incorporate delegated pubkeys
|
||||
// if the event address is not in allowed_addrs.
|
||||
if !allowed_addrs.contains(&event.pubkey) {
|
||||
info!(
|
||||
"Rejecting event {}, unauthorized author",
|
||||
event.get_event_id_prefix()
|
||||
);
|
||||
notice_tx
|
||||
.try_send(Notice::blocked(
|
||||
event.id,
|
||||
"pubkey is not allowed to publish to this relay",
|
||||
))
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// send any metadata events to the NIP-05 verifier
|
||||
if nip05_active && event.is_kind_metadata() {
|
||||
// we are sending this prior to even deciding if we
|
||||
// persist it. this allows the nip05 module to
|
||||
// inspect it, update if necessary, or persist a new
|
||||
// event and broadcast it itself.
|
||||
metadata_tx.send(event.clone()).ok();
|
||||
}
|
||||
|
||||
// check for NIP-05 verification
|
||||
if nip05_enabled {
|
||||
match nip05::query_latest_user_verification(pool.get()?, event.pubkey.to_owned()) {
|
||||
Ok(uv) => {
|
||||
if uv.is_valid(&settings.verified_users) {
|
||||
info!(
|
||||
"new event from verified author ({:?},{:?})",
|
||||
uv.name.to_string(),
|
||||
event.get_author_prefix()
|
||||
);
|
||||
} else {
|
||||
info!("rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)",
|
||||
uv.name.to_string(),
|
||||
event.get_author_prefix()
|
||||
);
|
||||
notice_tx
|
||||
.try_send(Notice::blocked(
|
||||
event.id,
|
||||
"NIP-05 verification is no longer valid (expired/wrong domain)",
|
||||
))
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
|
||||
debug!(
|
||||
"no verification records found for pubkey: {:?}",
|
||||
} else {
|
||||
info!(
|
||||
"rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)",
|
||||
uv.name.to_string(),
|
||||
event.get_author_prefix()
|
||||
);
|
||||
notice_tx
|
||||
.try_send(Notice::blocked(
|
||||
event.id,
|
||||
"NIP-05 verification needed to publish events",
|
||||
"NIP-05 verification is no longer valid (expired/wrong domain)",
|
||||
))
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("checking nip05 verification status failed: {:?}", e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: cache recent list of authors to remove a DB call.
|
||||
let start = Instant::now();
|
||||
if event.kind >= 20000 && event.kind < 30000 {
|
||||
bcast_tx.send(event.clone()).ok();
|
||||
info!(
|
||||
"published ephemeral event: {:?} from: {:?} in: {:?}",
|
||||
event.get_event_id_prefix(),
|
||||
event.get_author_prefix(),
|
||||
start.elapsed()
|
||||
);
|
||||
event_write = true
|
||||
} else {
|
||||
match write_event(&mut pool.get()?, &event) {
|
||||
Ok(updated) => {
|
||||
if updated == 0 {
|
||||
trace!("ignoring duplicate or deleted event");
|
||||
notice_tx.try_send(Notice::duplicate(event.id)).ok();
|
||||
} else {
|
||||
info!(
|
||||
"persisted event: {:?} from: {:?} in: {:?}",
|
||||
event.get_event_id_prefix(),
|
||||
event.get_author_prefix(),
|
||||
start.elapsed()
|
||||
);
|
||||
event_write = true;
|
||||
// send this out to all clients
|
||||
bcast_tx.send(event.clone()).ok();
|
||||
notice_tx.try_send(Notice::saved(event.id)).ok();
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("event insert failed: {:?}", err);
|
||||
let msg = "relay experienced an error trying to publish the latest event";
|
||||
notice_tx.try_send(Notice::error(event.id, msg)).ok();
|
||||
}
|
||||
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
|
||||
debug!(
|
||||
"no verification records found for pubkey: {:?}",
|
||||
event.get_author_prefix()
|
||||
);
|
||||
notice_tx
|
||||
.try_send(Notice::blocked(
|
||||
event.id,
|
||||
"NIP-05 verification needed to publish events",
|
||||
))
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// use rate limit, if defined, and if an event was actually written.
|
||||
if event_write {
|
||||
if let Some(ref lim) = lim_opt {
|
||||
if let Err(n) = lim.check() {
|
||||
let wait_for = n.wait_time_from(clock.now());
|
||||
// check if we have recently logged rate
|
||||
// limits, but print out a message only once
|
||||
// per second.
|
||||
if most_recent_rate_limit.elapsed().as_secs() > 10 {
|
||||
warn!(
|
||||
"rate limit reached for event creation (sleep for {:?}) (suppressing future messages for 10 seconds)",
|
||||
wait_for
|
||||
);
|
||||
// reset last rate limit message
|
||||
most_recent_rate_limit = Instant::now();
|
||||
}
|
||||
// block event writes, allowing them to queue up
|
||||
thread::sleep(wait_for);
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("checking nip05 verification status failed: {:?}", e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("database connection closed");
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
/// Persist an event to the database, returning rows added.
|
||||
pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
|
||||
// start transaction
|
||||
let tx = conn.transaction()?;
|
||||
// get relevant fields from event and convert to blobs.
|
||||
let id_blob = hex::decode(&e.id).ok();
|
||||
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
|
||||
let delegator_blob: Option<Vec<u8>> = e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
|
||||
let event_str = serde_json::to_string(&e).ok();
|
||||
// ignore if the event hash is a duplicate.
|
||||
let mut ins_count = tx.execute(
|
||||
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, strftime('%s','now'), FALSE);",
|
||||
params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str]
|
||||
)?;
|
||||
if ins_count == 0 {
|
||||
// if the event was a duplicate, no need to insert event or
|
||||
// pubkey references. This will abort the txn.
|
||||
return Ok(ins_count);
|
||||
}
|
||||
// remember primary key of the event most recently inserted.
|
||||
let ev_id = tx.last_insert_rowid();
|
||||
// add all tags to the tag table
|
||||
for tag in e.tags.iter() {
|
||||
// ensure we have 2 values.
|
||||
if tag.len() >= 2 {
|
||||
let tagname = &tag[0];
|
||||
let tagval = &tag[1];
|
||||
// only single-char tags are searchable
|
||||
let tagchar_opt = single_char_tagname(tagname);
|
||||
match &tagchar_opt {
|
||||
Some(_) => {
|
||||
// if tagvalue is lowercase hex;
|
||||
if is_lower_hex(tagval) && (tagval.len() % 2 == 0) {
|
||||
tx.execute(
|
||||
"INSERT OR IGNORE INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3)",
|
||||
params![ev_id, &tagname, hex::decode(tagval).ok()],
|
||||
)?;
|
||||
// nip05 address
|
||||
let nip05_address : Option<crate::nip05::Nip05Name> = validation.and_then(|x| x.ok().map(|y| y.name));
|
||||
|
||||
// GRPC check
|
||||
if let Some(ref mut c) = grpc_client {
|
||||
trace!("checking if grpc permits");
|
||||
let grpc_start = Instant::now();
|
||||
let decision_res = c.admit_event(&event, &subm_event.source_ip, subm_event.origin, subm_event.user_agent, nip05_address, subm_event.auth_pubkey).await;
|
||||
match decision_res {
|
||||
Ok(decision) => {
|
||||
if !decision.permitted() {
|
||||
// GPRC returned a decision to reject this event
|
||||
info!("GRPC rejected event: {:?} (kind: {}) from: {:?} in: {:?} (IP: {:?})",
|
||||
event.get_event_id_prefix(),
|
||||
event.kind,
|
||||
event.get_author_prefix(),
|
||||
grpc_start.elapsed(),
|
||||
subm_event.source_ip);
|
||||
notice_tx.try_send(Notice::blocked(event.id, &decision.message().unwrap_or_else(|| "".to_string()))).ok();
|
||||
continue;
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
warn!("GRPC server error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: cache recent list of authors to remove a DB call.
|
||||
let start = Instant::now();
|
||||
if event.is_ephemeral() {
|
||||
bcast_tx.send(event.clone()).ok();
|
||||
debug!(
|
||||
"published ephemeral event: {:?} from: {:?} in: {:?}",
|
||||
event.get_event_id_prefix(),
|
||||
event.get_author_prefix(),
|
||||
start.elapsed()
|
||||
);
|
||||
event_write = true;
|
||||
} else {
|
||||
match repo.write_event(&event).await {
|
||||
Ok(updated) => {
|
||||
if updated == 0 {
|
||||
trace!("ignoring duplicate or deleted event");
|
||||
notice_tx.try_send(Notice::duplicate(event.id)).ok();
|
||||
} else {
|
||||
tx.execute(
|
||||
"INSERT OR IGNORE INTO tag (event_id, name, value) VALUES (?1, ?2, ?3)",
|
||||
params![ev_id, &tagname, &tagval],
|
||||
)?;
|
||||
info!(
|
||||
"persisted event: {:?} (kind: {}) from: {:?} in: {:?} (IP: {:?})",
|
||||
event.get_event_id_prefix(),
|
||||
event.kind,
|
||||
event.get_author_prefix(),
|
||||
start.elapsed(),
|
||||
subm_event.source_ip,
|
||||
);
|
||||
event_write = true;
|
||||
// send this out to all clients
|
||||
bcast_tx.send(event.clone()).ok();
|
||||
notice_tx.try_send(Notice::saved(event.id)).ok();
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
Err(err) => {
|
||||
warn!("event insert failed: {:?}", err);
|
||||
let msg = "relay experienced an error trying to publish the latest event";
|
||||
notice_tx.try_send(Notice::error(event.id, msg)).ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// use rate limit, if defined, and if an event was actually written.
|
||||
if event_write {
|
||||
if let Some(ref lim) = lim_opt {
|
||||
if let Err(n) = lim.check() {
|
||||
let wait_for = n.wait_time_from(clock.now());
|
||||
// check if we have recently logged rate
|
||||
// limits, but print out a message only once
|
||||
// per second.
|
||||
if most_recent_rate_limit.elapsed().as_secs() > 10 {
|
||||
warn!(
|
||||
"rate limit reached for event creation (sleep for {:?}) (suppressing future messages for 10 seconds)",
|
||||
wait_for
|
||||
);
|
||||
// reset last rate limit message
|
||||
most_recent_rate_limit = Instant::now();
|
||||
}
|
||||
// block event writes, allowing them to queue up
|
||||
thread::sleep(wait_for);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// if this event is replaceable update, hide every other replaceable
|
||||
// event with the same kind from the same author that was issued
|
||||
// earlier than this.
|
||||
if e.kind == 0 || e.kind == 3 || (e.kind >= 10000 && e.kind < 20000) {
|
||||
let update_count = tx.execute(
|
||||
"UPDATE event SET hidden=TRUE WHERE id!=? AND kind=? AND author=? AND created_at <= ? and hidden!=TRUE",
|
||||
params![ev_id, e.kind, hex::decode(&e.pubkey).ok(), e.created_at],
|
||||
)?;
|
||||
if update_count > 0 {
|
||||
info!(
|
||||
"hid {} older replaceable kind {} events for author: {:?}",
|
||||
update_count,
|
||||
e.kind,
|
||||
e.get_author_prefix()
|
||||
);
|
||||
}
|
||||
}
|
||||
// if this event is a deletion, hide the referenced events from the same author.
|
||||
if e.kind == 5 {
|
||||
let event_candidates = e.tag_values_by_name("e");
|
||||
// first parameter will be author
|
||||
let mut params: Vec<Box<dyn ToSql>> = vec![Box::new(hex::decode(&e.pubkey)?)];
|
||||
event_candidates
|
||||
.iter()
|
||||
.filter(|x| is_hex(x) && x.len() == 64)
|
||||
.filter_map(|x| hex::decode(x).ok())
|
||||
.for_each(|x| params.push(Box::new(x)));
|
||||
let query = format!(
|
||||
"UPDATE event SET hidden=TRUE WHERE kind!=5 AND author=? AND event_hash IN ({})",
|
||||
repeat_vars(params.len() - 1)
|
||||
);
|
||||
let mut stmt = tx.prepare(&query)?;
|
||||
let update_count = stmt.execute(rusqlite::params_from_iter(params))?;
|
||||
info!(
|
||||
"hid {} deleted events for author {:?}",
|
||||
update_count,
|
||||
e.get_author_prefix()
|
||||
);
|
||||
} else {
|
||||
// check if a deletion has already been recorded for this event.
|
||||
// Only relevant for non-deletion events
|
||||
let del_count = tx.query_row(
|
||||
"SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.author=? AND t.name='e' AND e.kind=5 AND t.value_hex=? LIMIT 1;",
|
||||
params![pubkey_blob, id_blob], |row| row.get::<usize, usize>(0));
|
||||
// check if a the query returned a result, meaning we should
|
||||
// hid the current event
|
||||
if del_count.ok().is_some() {
|
||||
// a deletion already existed, mark original event as hidden.
|
||||
info!(
|
||||
"hid event: {:?} due to existing deletion by author: {:?}",
|
||||
e.get_event_id_prefix(),
|
||||
e.get_author_prefix()
|
||||
);
|
||||
let _update_count =
|
||||
tx.execute("UPDATE event SET hidden=TRUE WHERE id=?", params![ev_id])?;
|
||||
// event was deleted, so let caller know nothing new
|
||||
// arrived, preventing this from being sent to active
|
||||
// subscriptions
|
||||
ins_count = 0;
|
||||
}
|
||||
}
|
||||
tx.commit()?;
|
||||
Ok(ins_count)
|
||||
info!("database connection closed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serialized event associated with a specific subscription request.
|
||||
@@ -407,294 +339,3 @@ pub struct QueryResult {
|
||||
/// Serialized event
|
||||
pub event: String,
|
||||
}
|
||||
|
||||
/// Produce a arbitrary list of '?' parameters.
|
||||
fn repeat_vars(count: usize) -> String {
|
||||
if count == 0 {
|
||||
return "".to_owned();
|
||||
}
|
||||
let mut s = "?,".repeat(count);
|
||||
// Remove trailing comma
|
||||
s.pop();
|
||||
s
|
||||
}
|
||||
|
||||
/// Create a dynamic SQL subquery and params from a subscription filter.
|
||||
fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
|
||||
// build a dynamic SQL query. all user-input is either an integer
|
||||
// (sqli-safe), or a string that is filtered to only contain
|
||||
// hexadecimal characters. Strings that require escaping (tag
|
||||
// names/values) use parameters.
|
||||
|
||||
// if the filter is malformed, don't return anything.
|
||||
if f.force_no_match {
|
||||
let empty_query =
|
||||
"SELECT DISTINCT(e.content), e.created_at FROM event e WHERE 1=0".to_owned();
|
||||
// query parameters for SQLite
|
||||
let empty_params: Vec<Box<dyn ToSql>> = vec![];
|
||||
return (empty_query, empty_params);
|
||||
}
|
||||
|
||||
let mut query = "SELECT DISTINCT(e.content), e.created_at FROM event e".to_owned();
|
||||
// query parameters for SQLite
|
||||
let mut params: Vec<Box<dyn ToSql>> = vec![];
|
||||
|
||||
// individual filter components (single conditions such as an author or event ID)
|
||||
let mut filter_components: Vec<String> = Vec::new();
|
||||
// Query for "authors", allowing prefix matches
|
||||
if let Some(authvec) = &f.authors {
|
||||
// take each author and convert to a hexsearch
|
||||
let mut auth_searches: Vec<String> = vec![];
|
||||
for auth in authvec {
|
||||
match hex_range(auth) {
|
||||
Some(HexSearch::Exact(ex)) => {
|
||||
auth_searches.push("author=? OR delegated_by=?".to_owned());
|
||||
params.push(Box::new(ex.clone()));
|
||||
params.push(Box::new(ex));
|
||||
}
|
||||
Some(HexSearch::Range(lower, upper)) => {
|
||||
auth_searches.push(
|
||||
"(author>? AND author<?) OR (delegated_by>? AND delegated_by<?)".to_owned(),
|
||||
);
|
||||
params.push(Box::new(lower.clone()));
|
||||
params.push(Box::new(upper.clone()));
|
||||
params.push(Box::new(lower));
|
||||
params.push(Box::new(upper));
|
||||
}
|
||||
Some(HexSearch::LowerOnly(lower)) => {
|
||||
auth_searches.push("author>? OR delegated_by>?".to_owned());
|
||||
params.push(Box::new(lower.clone()));
|
||||
params.push(Box::new(lower));
|
||||
}
|
||||
None => {
|
||||
info!("Could not parse hex range from author {:?}", auth);
|
||||
}
|
||||
}
|
||||
}
|
||||
if !authvec.is_empty() {
|
||||
let authors_clause = format!("({})", auth_searches.join(" OR "));
|
||||
filter_components.push(authors_clause);
|
||||
} else {
|
||||
// if the authors list was empty, we should never return
|
||||
// any results.
|
||||
filter_components.push("false".to_owned());
|
||||
}
|
||||
}
|
||||
// Query for Kind
|
||||
if let Some(ks) = &f.kinds {
|
||||
// kind is number, no escaping needed
|
||||
let str_kinds: Vec<String> = ks.iter().map(|x| x.to_string()).collect();
|
||||
let kind_clause = format!("kind IN ({})", str_kinds.join(", "));
|
||||
filter_components.push(kind_clause);
|
||||
}
|
||||
// Query for event, allowing prefix matches
|
||||
if let Some(idvec) = &f.ids {
|
||||
// take each author and convert to a hexsearch
|
||||
let mut id_searches: Vec<String> = vec![];
|
||||
for id in idvec {
|
||||
match hex_range(id) {
|
||||
Some(HexSearch::Exact(ex)) => {
|
||||
id_searches.push("event_hash=?".to_owned());
|
||||
params.push(Box::new(ex));
|
||||
}
|
||||
Some(HexSearch::Range(lower, upper)) => {
|
||||
id_searches.push("(event_hash>? AND event_hash<?)".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
params.push(Box::new(upper));
|
||||
}
|
||||
Some(HexSearch::LowerOnly(lower)) => {
|
||||
id_searches.push("event_hash>?".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
}
|
||||
None => {
|
||||
info!("Could not parse hex range from id {:?}", id);
|
||||
}
|
||||
}
|
||||
}
|
||||
if !idvec.is_empty() {
|
||||
let id_clause = format!("({})", id_searches.join(" OR "));
|
||||
filter_components.push(id_clause);
|
||||
} else {
|
||||
// if the ids list was empty, we should never return
|
||||
// any results.
|
||||
filter_components.push("false".to_owned());
|
||||
}
|
||||
}
|
||||
// Query for tags
|
||||
if let Some(map) = &f.tags {
|
||||
for (key, val) in map.iter() {
|
||||
let mut str_vals: Vec<Box<dyn ToSql>> = vec![];
|
||||
let mut blob_vals: Vec<Box<dyn ToSql>> = vec![];
|
||||
for v in val {
|
||||
if (v.len() % 2 == 0) && is_lower_hex(v) {
|
||||
if let Ok(h) = hex::decode(v) {
|
||||
blob_vals.push(Box::new(h));
|
||||
}
|
||||
} else {
|
||||
str_vals.push(Box::new(v.to_owned()));
|
||||
}
|
||||
}
|
||||
// create clauses with "?" params for each tag value being searched
|
||||
let str_clause = format!("value IN ({})", repeat_vars(str_vals.len()));
|
||||
let blob_clause = format!("value_hex IN ({})", repeat_vars(blob_vals.len()));
|
||||
// find evidence of the target tag name/value existing for this event.
|
||||
let tag_clause = format!("e.id IN (SELECT e.id FROM event e LEFT JOIN tag t on e.id=t.event_id WHERE hidden!=TRUE and (name=? AND ({} OR {})))", str_clause, blob_clause);
|
||||
// add the tag name as the first parameter
|
||||
params.push(Box::new(key.to_string()));
|
||||
// add all tag values that are plain strings as params
|
||||
params.append(&mut str_vals);
|
||||
// add all tag values that are blobs as params
|
||||
params.append(&mut blob_vals);
|
||||
filter_components.push(tag_clause);
|
||||
}
|
||||
}
|
||||
// Query for timestamp
|
||||
if f.since.is_some() {
|
||||
let created_clause = format!("created_at > {}", f.since.unwrap());
|
||||
filter_components.push(created_clause);
|
||||
}
|
||||
// Query for timestamp
|
||||
if f.until.is_some() {
|
||||
let until_clause = format!("created_at < {}", f.until.unwrap());
|
||||
filter_components.push(until_clause);
|
||||
}
|
||||
// never display hidden events
|
||||
query.push_str(" WHERE hidden!=TRUE");
|
||||
// build filter component conditions
|
||||
if !filter_components.is_empty() {
|
||||
query.push_str(" AND ");
|
||||
query.push_str(&filter_components.join(" AND "));
|
||||
}
|
||||
// Apply per-filter limit to this subquery.
|
||||
// The use of a LIMIT implies a DESC order, to capture only the most recent events.
|
||||
if let Some(lim) = f.limit {
|
||||
let _ = write!(query, " ORDER BY e.created_at DESC LIMIT {}", lim);
|
||||
} else {
|
||||
query.push_str(" ORDER BY e.created_at ASC")
|
||||
}
|
||||
(query, params)
|
||||
}
|
||||
|
||||
/// Create a dynamic SQL query string and params from a subscription.
|
||||
fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
|
||||
// build a dynamic SQL query for an entire subscription, based on
|
||||
// SQL subqueries for filters.
|
||||
let mut subqueries: Vec<String> = Vec::new();
|
||||
// subquery params
|
||||
let mut params: Vec<Box<dyn ToSql>> = vec![];
|
||||
// for every filter in the subscription, generate a subquery
|
||||
for f in sub.filters.iter() {
|
||||
let (f_subquery, mut f_params) = query_from_filter(f);
|
||||
subqueries.push(f_subquery);
|
||||
params.append(&mut f_params);
|
||||
}
|
||||
// encapsulate subqueries into select statements
|
||||
let subqueries_selects: Vec<String> = subqueries
|
||||
.iter()
|
||||
.map(|s| format!("SELECT content, created_at FROM ({})", s))
|
||||
.collect();
|
||||
let query: String = subqueries_selects.join(" UNION ");
|
||||
(query, params)
|
||||
}
|
||||
|
||||
fn log_pool_stats(pool: &SqlitePool) {
|
||||
let state: r2d2::State = pool.state();
|
||||
let in_use_cxns = state.connections - state.idle_connections;
|
||||
debug!(
|
||||
"DB pool usage (in_use: {}, available: {})",
|
||||
in_use_cxns, state.connections
|
||||
);
|
||||
}
|
||||
|
||||
/// Perform a database query using a subscription.
|
||||
///
|
||||
/// The [`Subscription`] is converted into a SQL query. Each result
|
||||
/// is published on the `query_tx` channel as it is returned. If a
|
||||
/// message becomes available on the `abandon_query_rx` channel, the
|
||||
/// query is immediately aborted.
|
||||
pub async fn db_query(
|
||||
sub: Subscription,
|
||||
client_id: String,
|
||||
pool: SqlitePool,
|
||||
query_tx: tokio::sync::mpsc::Sender<QueryResult>,
|
||||
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
|
||||
) {
|
||||
task::spawn_blocking(move || {
|
||||
let mut row_count: usize = 0;
|
||||
let start = Instant::now();
|
||||
// generate SQL query
|
||||
let (q, p) = query_from_sub(&sub);
|
||||
trace!("SQL generated in {:?}", start.elapsed());
|
||||
// show pool stats
|
||||
log_pool_stats(&pool);
|
||||
// cutoff for displaying slow queries
|
||||
let slow_cutoff = Duration::from_millis(1000);
|
||||
let start = Instant::now();
|
||||
if let Ok(conn) = pool.get() {
|
||||
// execute the query. Don't cache, since queries vary so much.
|
||||
let mut stmt = conn.prepare(&q)?;
|
||||
let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?;
|
||||
let mut first_result = true;
|
||||
while let Some(row) = event_rows.next()? {
|
||||
if first_result {
|
||||
let first_result_elapsed = start.elapsed();
|
||||
// logging for slow queries; show sub and SQL
|
||||
if first_result_elapsed >= slow_cutoff {
|
||||
info!(
|
||||
"going to query for: {:?} (cid: {}, sub: {:?})",
|
||||
sub, client_id, sub.id
|
||||
);
|
||||
info!(
|
||||
"final query string (slow): {} (cid: {}, sub: {:?})",
|
||||
q, client_id, sub.id
|
||||
);
|
||||
} else {
|
||||
trace!(
|
||||
"going to query for: {:?} (cid: {}, sub: {:?})",
|
||||
sub,
|
||||
client_id,
|
||||
sub.id
|
||||
);
|
||||
trace!("final query string: {}", q);
|
||||
}
|
||||
debug!(
|
||||
"first result in {:?} (cid: {}, sub: {:?})",
|
||||
first_result_elapsed, client_id, sub.id
|
||||
);
|
||||
first_result = false;
|
||||
}
|
||||
// check if this is still active
|
||||
// TODO: check every N rows
|
||||
if abandon_query_rx.try_recv().is_ok() {
|
||||
debug!("query aborted (cid: {}, sub: {:?})", client_id, sub.id);
|
||||
return Ok(());
|
||||
}
|
||||
row_count += 1;
|
||||
let event_json = row.get(0)?;
|
||||
query_tx
|
||||
.blocking_send(QueryResult {
|
||||
sub_id: sub.get_id(),
|
||||
event: event_json,
|
||||
})
|
||||
.ok();
|
||||
}
|
||||
query_tx
|
||||
.blocking_send(QueryResult {
|
||||
sub_id: sub.get_id(),
|
||||
event: "EOSE".to_string(),
|
||||
})
|
||||
.ok();
|
||||
debug!(
|
||||
"query completed in {:?} (cid: {}, sub: {:?}, rows: {})",
|
||||
start.elapsed(),
|
||||
client_id,
|
||||
sub.id,
|
||||
row_count
|
||||
);
|
||||
} else {
|
||||
warn!("Could not get a database connection for querying");
|
||||
}
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
});
|
||||
}
|
||||
|
@@ -80,11 +80,11 @@ impl FromStr for Operator {
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct ConditionQuery {
|
||||
pub(crate) conditions: Vec<Condition>,
|
||||
pub conditions: Vec<Condition>,
|
||||
}
|
||||
|
||||
impl ConditionQuery {
|
||||
pub fn allows_event(&self, event: &Event) -> bool {
|
||||
#[must_use] pub fn allows_event(&self, event: &Event) -> bool {
|
||||
// check each condition, to ensure that the event complies
|
||||
// with the restriction.
|
||||
for c in &self.conditions {
|
||||
@@ -101,14 +101,14 @@ impl ConditionQuery {
|
||||
}
|
||||
|
||||
// Verify that the delegator approved the delegation; return a ConditionQuery if so.
|
||||
pub fn validate_delegation(
|
||||
#[must_use] pub fn validate_delegation(
|
||||
delegator: &str,
|
||||
delegatee: &str,
|
||||
cond_query: &str,
|
||||
sigstr: &str,
|
||||
) -> Option<ConditionQuery> {
|
||||
// form the token
|
||||
let tok = format!("nostr:delegation:{}:{}", delegatee, cond_query);
|
||||
let tok = format!("nostr:delegation:{delegatee}:{cond_query}");
|
||||
// form SHA256 hash
|
||||
let digest: sha256::Hash = sha256::Hash::hash(tok.as_bytes());
|
||||
let sig = schnorr::Signature::from_str(sigstr).unwrap();
|
||||
@@ -133,18 +133,18 @@ pub fn validate_delegation(
|
||||
}
|
||||
|
||||
/// Parsed delegation condition
|
||||
/// see https://github.com/nostr-protocol/nips/pull/28#pullrequestreview-1084903800
|
||||
/// An example complex condition would be: kind=1,2,3&created_at<1665265999
|
||||
/// see <https://github.com/nostr-protocol/nips/pull/28#pullrequestreview-1084903800>
|
||||
/// An example complex condition would be: `kind=1,2,3&created_at<1665265999`
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Condition {
|
||||
pub(crate) field: Field,
|
||||
pub(crate) operator: Operator,
|
||||
pub(crate) values: Vec<u64>,
|
||||
pub field: Field,
|
||||
pub operator: Operator,
|
||||
pub values: Vec<u64>,
|
||||
}
|
||||
|
||||
impl Condition {
|
||||
/// Check if this condition allows the given event to be delegated
|
||||
pub fn allows_event(&self, event: &Event) -> bool {
|
||||
#[must_use] pub fn allows_event(&self, event: &Event) -> bool {
|
||||
// determine what the right-hand side of the operator is
|
||||
let resolved_field = match &self.field {
|
||||
Field::Kind => event.kind,
|
||||
@@ -323,7 +323,7 @@ mod tests {
|
||||
Condition {
|
||||
field: Field::CreatedAt,
|
||||
operator: Operator::LessThan,
|
||||
values: vec![1665867123],
|
||||
values: vec![1_665_867_123],
|
||||
},
|
||||
],
|
||||
};
|
||||
@@ -332,19 +332,6 @@ mod tests {
|
||||
assert_eq!(parsed, cq);
|
||||
Ok(())
|
||||
}
|
||||
fn simple_event() -> Event {
|
||||
Event {
|
||||
id: "0".to_owned(),
|
||||
pubkey: "0".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: vec![],
|
||||
content: "".to_owned(),
|
||||
sig: "0".to_owned(),
|
||||
tagidx: None,
|
||||
}
|
||||
}
|
||||
// Check for condition logic on event w/ empty values
|
||||
#[test]
|
||||
fn condition_with_empty_values() {
|
||||
@@ -353,7 +340,7 @@ mod tests {
|
||||
operator: Operator::GreaterThan,
|
||||
values: vec![],
|
||||
};
|
||||
let e = simple_event();
|
||||
let e = Event::simple_event();
|
||||
assert!(!c.allows_event(&e));
|
||||
c.operator = Operator::LessThan;
|
||||
assert!(!c.allows_event(&e));
|
||||
@@ -373,7 +360,7 @@ mod tests {
|
||||
operator: Operator::GreaterThan,
|
||||
values: vec![10],
|
||||
};
|
||||
let mut e = simple_event();
|
||||
let mut e = Event::simple_event();
|
||||
// kind is not greater than 10, not allowed
|
||||
e.kind = 1;
|
||||
assert!(!c.allows_event(&e));
|
||||
@@ -392,7 +379,7 @@ mod tests {
|
||||
operator: Operator::Equals,
|
||||
values: vec![0, 10, 20],
|
||||
};
|
||||
let mut e = simple_event();
|
||||
let mut e = Event::simple_event();
|
||||
// Allow if event kind is in list for Equals
|
||||
e.kind = 10;
|
||||
assert!(c.allows_event(&e));
|
||||
|
33
src/error.rs
33
src/error.rs
@@ -48,6 +48,10 @@ pub enum Error {
|
||||
DatabaseDirError,
|
||||
#[error("Database Connection Pool Error")]
|
||||
DatabasePoolError(r2d2::Error),
|
||||
#[error("SQL error")]
|
||||
SqlxError(sqlx::Error),
|
||||
#[error("Database Connection Pool Error")]
|
||||
SqlxDatabasePoolError(sqlx::Error),
|
||||
#[error("Custom Error : {0}")]
|
||||
CustomError(String),
|
||||
#[error("Task join error")]
|
||||
@@ -58,6 +62,16 @@ pub enum Error {
|
||||
HexError(hex::FromHexError),
|
||||
#[error("Delegation parse error")]
|
||||
DelegationParseError,
|
||||
#[error("Channel closed error")]
|
||||
ChannelClosed,
|
||||
#[error("Authz error")]
|
||||
AuthzError,
|
||||
#[error("Tonic GRPC error")]
|
||||
TonicError(tonic::Status),
|
||||
#[error("Invalid AUTH message")]
|
||||
AuthFailure,
|
||||
#[error("I/O Error")]
|
||||
IoError(std::io::Error),
|
||||
#[error("Unknown/Undocumented")]
|
||||
UnknownError,
|
||||
}
|
||||
@@ -100,6 +114,12 @@ impl From<rusqlite::Error> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<sqlx::Error> for Error {
|
||||
fn from(d: sqlx::Error) -> Self {
|
||||
Error::SqlxDatabasePoolError(d)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Error> for Error {
|
||||
/// Wrap JSON error
|
||||
fn from(r: serde_json::Error) -> Self {
|
||||
@@ -120,3 +140,16 @@ impl From<config::ConfigError> for Error {
|
||||
Error::ConfigError(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<tonic::Status> for Error {
|
||||
/// Wrap Config error
|
||||
fn from(r: tonic::Status) -> Self {
|
||||
Error::TonicError(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for Error {
|
||||
fn from(r: std::io::Error) -> Self {
|
||||
Error::IoError(r)
|
||||
}
|
||||
}
|
||||
|
390
src/event.rs
390
src/event.rs
@@ -1,6 +1,6 @@
|
||||
//! Event parsing and validation
|
||||
use crate::delegation::validate_delegation;
|
||||
use crate::error::Error::*;
|
||||
use crate::error::Error::{CommandUnknownError, EventCouldNotCanonicalize, EventInvalidId, EventInvalidSignature, EventMalformedPubkey};
|
||||
use crate::error::Result;
|
||||
use crate::nip05;
|
||||
use crate::utils::unix_time;
|
||||
@@ -14,6 +14,8 @@ use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::str::FromStr;
|
||||
use tracing::{debug, info};
|
||||
use crate::event::EventWrapper::WrappedEvent;
|
||||
use crate::event::EventWrapper::WrappedAuth;
|
||||
|
||||
lazy_static! {
|
||||
/// Secp256k1 verification instance.
|
||||
@@ -28,7 +30,7 @@ pub struct EventCmd {
|
||||
}
|
||||
|
||||
impl EventCmd {
|
||||
pub fn event_id(&self) -> &str {
|
||||
#[must_use] pub fn event_id(&self) -> &str {
|
||||
&self.event.id
|
||||
}
|
||||
}
|
||||
@@ -37,19 +39,19 @@ impl EventCmd {
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Event {
|
||||
pub id: String,
|
||||
pub(crate) pubkey: String,
|
||||
pub pubkey: String,
|
||||
#[serde(skip)]
|
||||
pub(crate) delegated_by: Option<String>,
|
||||
pub(crate) created_at: u64,
|
||||
pub(crate) kind: u64,
|
||||
pub delegated_by: Option<String>,
|
||||
pub created_at: u64,
|
||||
pub kind: u64,
|
||||
#[serde(deserialize_with = "tag_from_string")]
|
||||
// NOTE: array-of-arrays may need to be more general than a string container
|
||||
pub(crate) tags: Vec<Vec<String>>,
|
||||
pub(crate) content: String,
|
||||
pub(crate) sig: String,
|
||||
pub tags: Vec<Vec<String>>,
|
||||
pub content: String,
|
||||
pub sig: String,
|
||||
// Optimization for tag search, built on demand.
|
||||
#[serde(skip)]
|
||||
pub(crate) tagidx: Option<HashMap<char, HashSet<String>>>,
|
||||
pub tagidx: Option<HashMap<char, HashSet<String>>>,
|
||||
}
|
||||
|
||||
/// Simple tag type for array of array of strings.
|
||||
@@ -65,7 +67,7 @@ where
|
||||
}
|
||||
|
||||
/// Attempt to form a single-char tag name.
|
||||
pub fn single_char_tagname(tagname: &str) -> Option<char> {
|
||||
#[must_use] pub fn single_char_tagname(tagname: &str) -> Option<char> {
|
||||
// We return the tag character if and only if the tagname consists
|
||||
// of a single char.
|
||||
let mut tagnamechars = tagname.chars();
|
||||
@@ -83,30 +85,112 @@ pub fn single_char_tagname(tagname: &str) -> Option<char> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub enum EventWrapper {
|
||||
WrappedEvent(Event),
|
||||
WrappedAuth(Event)
|
||||
}
|
||||
|
||||
/// Convert network event to parsed/validated event.
|
||||
impl From<EventCmd> for Result<Event> {
|
||||
fn from(ec: EventCmd) -> Result<Event> {
|
||||
impl From<EventCmd> for Result<EventWrapper> {
|
||||
fn from(ec: EventCmd) -> Result<EventWrapper> {
|
||||
// ensure command is correct
|
||||
if ec.cmd != "EVENT" {
|
||||
Err(CommandUnknownError)
|
||||
} else {
|
||||
if ec.cmd == "EVENT" {
|
||||
ec.event.validate().map(|_| {
|
||||
let mut e = ec.event;
|
||||
e.build_index();
|
||||
e.update_delegation();
|
||||
e
|
||||
WrappedEvent(e)
|
||||
})
|
||||
} else if ec.cmd == "AUTH" {
|
||||
// we don't want to validate the event here, because NIP-42 can be disabled
|
||||
// it will be validated later during the authentication process
|
||||
Ok(WrappedAuth(ec.event))
|
||||
} else {
|
||||
Err(CommandUnknownError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Event {
|
||||
pub fn is_kind_metadata(&self) -> bool {
|
||||
#[cfg(test)]
|
||||
#[must_use] pub fn simple_event() -> Event {
|
||||
Event {
|
||||
id: "0".to_owned(),
|
||||
pubkey: "0".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: vec![],
|
||||
content: "".to_owned(),
|
||||
sig: "0".to_owned(),
|
||||
tagidx: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use] pub fn is_kind_metadata(&self) -> bool {
|
||||
self.kind == 0
|
||||
}
|
||||
|
||||
/// Should this event be persisted?
|
||||
#[must_use] pub fn is_ephemeral(&self) -> bool {
|
||||
self.kind >= 20000 && self.kind < 30000
|
||||
}
|
||||
|
||||
/// Is this event currently expired?
|
||||
pub fn is_expired(&self) -> bool {
|
||||
if let Some(exp) = self.expiration() {
|
||||
exp <= unix_time()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine the time at which this event should expire
|
||||
pub fn expiration(&self) -> Option<u64> {
|
||||
let default = "".to_string();
|
||||
let dvals:Vec<&String> = self.tags
|
||||
.iter()
|
||||
.filter(|x| !x.is_empty())
|
||||
.filter(|x| x.get(0).unwrap() == "expiration")
|
||||
.map(|x| x.get(1).unwrap_or(&default)).take(1)
|
||||
.collect();
|
||||
let val_first = dvals.get(0);
|
||||
val_first.and_then(|t| t.parse::<u64>().ok())
|
||||
}
|
||||
|
||||
/// Should this event be replaced with newer timestamps from same author?
|
||||
#[must_use] pub fn is_replaceable(&self) -> bool {
|
||||
self.kind == 0 || self.kind == 3 || self.kind == 41 || (self.kind >= 10000 && self.kind < 20000)
|
||||
}
|
||||
|
||||
/// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values?
|
||||
#[must_use] pub fn is_param_replaceable(&self) -> bool {
|
||||
self.kind >= 30000 && self.kind < 40000
|
||||
}
|
||||
|
||||
/// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values?
|
||||
#[must_use] pub fn distinct_param(&self) -> Option<String> {
|
||||
if self.is_param_replaceable() {
|
||||
let default = "".to_string();
|
||||
let dvals:Vec<&String> = self.tags
|
||||
.iter()
|
||||
.filter(|x| !x.is_empty())
|
||||
.filter(|x| x.get(0).unwrap() == "d")
|
||||
.map(|x| x.get(1).unwrap_or(&default)).take(1)
|
||||
.collect();
|
||||
let dval_first = dvals.get(0);
|
||||
match dval_first {
|
||||
Some(_) => {dval_first.map(|x| x.to_string())},
|
||||
None => Some(default)
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Pull a NIP-05 Name out of the event, if one exists
|
||||
pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
|
||||
#[must_use] pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
|
||||
if self.is_kind_metadata() {
|
||||
// very quick check if we should attempt to parse this json
|
||||
if self.content.contains("\"nip05\"") {
|
||||
@@ -123,7 +207,7 @@ impl Event {
|
||||
// is this event delegated (properly)?
|
||||
// does the signature match, and are conditions valid?
|
||||
// if so, return an alternate author for the event
|
||||
pub fn delegated_author(&self) -> Option<String> {
|
||||
#[must_use] pub fn delegated_author(&self) -> Option<String> {
|
||||
// is there a delegation tag?
|
||||
let delegation_tag: Vec<String> = self
|
||||
.tags
|
||||
@@ -131,8 +215,7 @@ impl Event {
|
||||
.filter(|x| x.len() == 4)
|
||||
.filter(|x| x.get(0).unwrap() == "delegation")
|
||||
.take(1)
|
||||
.next()?
|
||||
.to_vec(); // get first tag
|
||||
.next()?.clone(); // get first tag
|
||||
|
||||
//let delegation_tag = self.tag_values_by_name("delegation");
|
||||
// delegation tags should have exactly 3 elements after the name (pubkey, condition, sig)
|
||||
@@ -161,11 +244,11 @@ impl Event {
|
||||
}
|
||||
|
||||
/// Update delegation status
|
||||
fn update_delegation(&mut self) {
|
||||
pub fn update_delegation(&mut self) {
|
||||
self.delegated_by = self.delegated_author();
|
||||
}
|
||||
/// Build an event tag index
|
||||
fn build_index(&mut self) {
|
||||
pub fn build_index(&mut self) {
|
||||
// if there are no tags; just leave the index as None
|
||||
if self.tags.is_empty() {
|
||||
return;
|
||||
@@ -192,24 +275,24 @@ impl Event {
|
||||
}
|
||||
|
||||
/// Create a short event identifier, suitable for logging.
|
||||
pub fn get_event_id_prefix(&self) -> String {
|
||||
#[must_use] pub fn get_event_id_prefix(&self) -> String {
|
||||
self.id.chars().take(8).collect()
|
||||
}
|
||||
pub fn get_author_prefix(&self) -> String {
|
||||
#[must_use] pub fn get_author_prefix(&self) -> String {
|
||||
self.pubkey.chars().take(8).collect()
|
||||
}
|
||||
|
||||
/// Retrieve tag initial values across all tags matching the name
|
||||
pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
|
||||
#[must_use] pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
|
||||
self.tags
|
||||
.iter()
|
||||
.filter(|x| x.len() > 1)
|
||||
.filter(|x| x.get(0).unwrap() == tag_name)
|
||||
.map(|x| x.get(1).unwrap().to_owned())
|
||||
.map(|x| x.get(1).unwrap().clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
|
||||
#[must_use] pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
|
||||
if let Some(allowable_future) = reject_future_seconds {
|
||||
let curr_time = unix_time();
|
||||
// calculate difference, plus how far future we allow
|
||||
@@ -226,7 +309,7 @@ impl Event {
|
||||
}
|
||||
|
||||
/// Check if this event has a valid signature.
|
||||
fn validate(&self) -> Result<()> {
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
// TODO: return a Result with a reason for invalid events
|
||||
// validation is performed by:
|
||||
// * parsing JSON string into event fields
|
||||
@@ -241,7 +324,7 @@ impl Event {
|
||||
let c = c_opt.unwrap();
|
||||
// * compute the sha256sum.
|
||||
let digest: sha256::Hash = sha256::Hash::hash(c.as_bytes());
|
||||
let hex_digest = format!("{:x}", digest);
|
||||
let hex_digest = format!("{digest:x}");
|
||||
// * ensure the id matches the computed sha256sum.
|
||||
if self.id != hex_digest {
|
||||
debug!("event id does not match digest");
|
||||
@@ -264,14 +347,14 @@ impl Event {
|
||||
}
|
||||
|
||||
/// Convert event to canonical representation for signing.
|
||||
fn to_canonical(&self) -> Option<String> {
|
||||
pub fn to_canonical(&self) -> Option<String> {
|
||||
// create a JsonValue for each event element
|
||||
let mut c: Vec<Value> = vec![];
|
||||
// id must be set to 0
|
||||
let id = Number::from(0_u64);
|
||||
c.push(serde_json::Value::Number(id));
|
||||
// public key
|
||||
c.push(Value::String(self.pubkey.to_owned()));
|
||||
c.push(Value::String(self.pubkey.clone()));
|
||||
// creation time
|
||||
let created_at = Number::from(self.created_at);
|
||||
c.push(serde_json::Value::Number(created_at));
|
||||
@@ -281,7 +364,7 @@ impl Event {
|
||||
// tags
|
||||
c.push(self.tags_to_canonical());
|
||||
// content
|
||||
c.push(Value::String(self.content.to_owned()));
|
||||
c.push(Value::String(self.content.clone()));
|
||||
serde_json::to_string(&Value::Array(c)).ok()
|
||||
}
|
||||
|
||||
@@ -289,11 +372,11 @@ impl Event {
|
||||
fn tags_to_canonical(&self) -> Value {
|
||||
let mut tags = Vec::<Value>::new();
|
||||
// iterate over self tags,
|
||||
for t in self.tags.iter() {
|
||||
for t in &self.tags {
|
||||
// each tag is a vec of strings
|
||||
let mut a = Vec::<Value>::new();
|
||||
for v in t.iter() {
|
||||
a.push(serde_json::Value::String(v.to_owned()));
|
||||
a.push(serde_json::Value::String(v.clone()));
|
||||
}
|
||||
tags.push(serde_json::Value::Array(a));
|
||||
}
|
||||
@@ -301,7 +384,7 @@ impl Event {
|
||||
}
|
||||
|
||||
/// Determine if the given tag and value set intersect with tags in this event.
|
||||
pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
|
||||
#[must_use] pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
|
||||
match &self.tagidx {
|
||||
// check if this is indexable tagname
|
||||
Some(idx) => match idx.get(&tagname) {
|
||||
@@ -319,31 +402,18 @@ impl Event {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
fn simple_event() -> Event {
|
||||
Event {
|
||||
id: "0".to_owned(),
|
||||
pubkey: "0".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: vec![],
|
||||
content: "".to_owned(),
|
||||
sig: "0".to_owned(),
|
||||
tagidx: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_creation() {
|
||||
// create an event
|
||||
let event = simple_event();
|
||||
let event = Event::simple_event();
|
||||
assert_eq!(event.id, "0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_serialize() -> Result<()> {
|
||||
// serialize an event to JSON string
|
||||
let event = simple_event();
|
||||
let event = Event::simple_event();
|
||||
let j = serde_json::to_string(&event)?;
|
||||
assert_eq!(j, "{\"id\":\"0\",\"pubkey\":\"0\",\"created_at\":0,\"kind\":0,\"tags\":[],\"content\":\"\",\"sig\":\"0\"}");
|
||||
Ok(())
|
||||
@@ -351,14 +421,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn empty_event_tag_match() {
|
||||
let event = simple_event();
|
||||
let event = Event::simple_event();
|
||||
assert!(!event
|
||||
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
|
||||
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_event_tag_match() {
|
||||
let mut event = simple_event();
|
||||
let mut event = Event::simple_event();
|
||||
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
|
||||
event.build_index();
|
||||
assert_eq!(
|
||||
@@ -373,7 +443,7 @@ mod tests {
|
||||
#[test]
|
||||
fn event_tags_serialize() -> Result<()> {
|
||||
// serialize an event with tags to JSON string
|
||||
let mut event = simple_event();
|
||||
let mut event = Event::simple_event();
|
||||
event.tags = vec![
|
||||
vec![
|
||||
"e".to_owned(),
|
||||
@@ -406,7 +476,7 @@ mod tests {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
created_at: 501_234,
|
||||
kind: 1,
|
||||
tags: vec![],
|
||||
content: "this is a test".to_owned(),
|
||||
@@ -424,7 +494,7 @@ mod tests {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
created_at: 501_234,
|
||||
kind: 1,
|
||||
tags: vec![
|
||||
vec!["j".to_owned(), "abc".to_owned()],
|
||||
@@ -451,7 +521,7 @@ mod tests {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
created_at: 501_234,
|
||||
kind: 1,
|
||||
tags: vec![
|
||||
vec!["j".to_owned(), "abc".to_owned()],
|
||||
@@ -478,7 +548,7 @@ mod tests {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
created_at: 501_234,
|
||||
kind: 1,
|
||||
tags: vec![
|
||||
vec!["#e".to_owned(), "aoeu".to_owned()],
|
||||
@@ -497,4 +567,204 @@ mod tests {
|
||||
let expected = Some(expected_json.to_owned());
|
||||
assert_eq!(c, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ephemeral_event() {
|
||||
let mut event = Event::simple_event();
|
||||
event.kind=20000;
|
||||
assert!(event.is_ephemeral());
|
||||
event.kind=29999;
|
||||
assert!(event.is_ephemeral());
|
||||
event.kind=30000;
|
||||
assert!(!event.is_ephemeral());
|
||||
event.kind=19999;
|
||||
assert!(!event.is_ephemeral());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replaceable_event() {
|
||||
let mut event = Event::simple_event();
|
||||
event.kind=0;
|
||||
assert!(event.is_replaceable());
|
||||
event.kind=3;
|
||||
assert!(event.is_replaceable());
|
||||
event.kind=10000;
|
||||
assert!(event.is_replaceable());
|
||||
event.kind=19999;
|
||||
assert!(event.is_replaceable());
|
||||
event.kind=20000;
|
||||
assert!(!event.is_replaceable());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_event() {
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
assert!(event.is_param_replaceable());
|
||||
event.kind = 39999;
|
||||
assert!(event.is_param_replaceable());
|
||||
event.kind = 29999;
|
||||
assert!(!event.is_param_replaceable());
|
||||
event.kind = 40000;
|
||||
assert!(!event.is_param_replaceable());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_value_case_1() {
|
||||
// NIP case #1: "tags":[["d",""]]
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned(), "".to_owned()]];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_value_case_2() {
|
||||
// NIP case #2: "tags":[]: implicit d tag with empty value
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_value_case_3() {
|
||||
// NIP case #3: "tags":[["d"]]: implicit empty value ""
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned()]];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_value_case_4() {
|
||||
// NIP case #4: "tags":[["d",""],["d","not empty"]]: only first d tag is considered
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned(), "".to_string()],
|
||||
vec!["d".to_owned(), "not empty".to_string()]
|
||||
];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_value_case_4b() {
|
||||
// Variation of #4 with
|
||||
// NIP case #4: "tags":[["d","not empty"],["d",""]]: only first d tag is considered
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned(), "not empty".to_string()],
|
||||
vec!["d".to_owned(), "".to_string()]
|
||||
];
|
||||
assert_eq!(event.distinct_param(), Some("not empty".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_value_case_5() {
|
||||
// NIP case #5: "tags":[["d"],["d","some value"]]: only first d tag is considered
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned()],
|
||||
vec!["d".to_owned(), "second value".to_string()],
|
||||
vec!["d".to_owned(), "third value".to_string()]
|
||||
];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_value_case_6() {
|
||||
// NIP case #6: "tags":[["e"]]: same as no tags
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["e".to_owned()],
|
||||
];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiring_event_none() {
|
||||
// regular events do not expire
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 7;
|
||||
event.tags = vec![
|
||||
vec!["test".to_string(), "foo".to_string()],
|
||||
];
|
||||
assert_eq!(event.expiration(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiring_event_empty() {
|
||||
// regular events do not expire
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 7;
|
||||
event.tags = vec![
|
||||
vec!["expiration".to_string()],
|
||||
];
|
||||
assert_eq!(event.expiration(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiring_event_future() {
|
||||
// a normal expiring event
|
||||
let exp:u64 = 1676264138;
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 1;
|
||||
event.tags = vec![
|
||||
vec!["expiration".to_string(), exp.to_string()],
|
||||
];
|
||||
assert_eq!(event.expiration(), Some(exp));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiring_event_negative() {
|
||||
// expiration set to a negative value (invalid)
|
||||
let exp:i64 = -90;
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 1;
|
||||
event.tags = vec![
|
||||
vec!["expiration".to_string(), exp.to_string()],
|
||||
];
|
||||
assert_eq!(event.expiration(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiring_event_zero() {
|
||||
// a normal expiring event set to zero
|
||||
let exp:i64 = 0;
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 1;
|
||||
event.tags = vec![
|
||||
vec!["expiration".to_string(), exp.to_string()],
|
||||
];
|
||||
assert_eq!(event.expiration(), Some(0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiring_event_fraction() {
|
||||
// expiration is fractional (invalid)
|
||||
let exp:f64 = 23.334;
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 1;
|
||||
event.tags = vec![
|
||||
vec!["expiration".to_string(), exp.to_string()],
|
||||
];
|
||||
assert_eq!(event.expiration(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiring_event_multiple() {
|
||||
// multiple values, we just take the first
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 1;
|
||||
event.tags = vec![
|
||||
vec!["expiration".to_string(), (10).to_string()],
|
||||
vec!["expiration".to_string(), (20).to_string()],
|
||||
];
|
||||
assert_eq!(event.expiration(), Some(10));
|
||||
}
|
||||
}
|
||||
|
@@ -1,9 +1,9 @@
|
||||
//! Utilities for searching hexadecimal
|
||||
use crate::utils::is_hex;
|
||||
use crate::utils::{is_hex};
|
||||
use hex;
|
||||
|
||||
/// Types of hexadecimal queries.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)]
|
||||
pub enum HexSearch {
|
||||
// when no range is needed, exact 32-byte
|
||||
Exact(Vec<u8>),
|
||||
@@ -19,16 +19,15 @@ fn is_all_fs(s: &str) -> bool {
|
||||
}
|
||||
|
||||
/// Find the next hex sequence greater than the argument.
|
||||
pub fn hex_range(s: &str) -> Option<HexSearch> {
|
||||
// handle special cases
|
||||
if !is_hex(s) || s.len() > 64 {
|
||||
#[must_use] pub fn hex_range(s: &str) -> Option<HexSearch> {
|
||||
let mut hash_base = s.to_owned();
|
||||
if !is_hex(&hash_base) || hash_base.len() > 64 {
|
||||
return None;
|
||||
}
|
||||
if s.len() == 64 {
|
||||
return Some(HexSearch::Exact(hex::decode(s).ok()?));
|
||||
if hash_base.len() == 64 {
|
||||
return Some(HexSearch::Exact(hex::decode(&hash_base).ok()?));
|
||||
}
|
||||
// if s is odd, add a zero
|
||||
let mut hash_base = s.to_owned();
|
||||
let mut odd = hash_base.len() % 2 != 0;
|
||||
if odd {
|
||||
// extend the string to make it even
|
||||
@@ -57,8 +56,9 @@ pub fn hex_range(s: &str) -> Option<HexSearch> {
|
||||
} else if odd {
|
||||
// check if first char in this byte is NOT 'f'
|
||||
if b < 240 {
|
||||
upper[byte_len] = b + 16; // bump up the first character in this byte
|
||||
// increment done, stop iterating through the vec
|
||||
// bump up the first character in this byte
|
||||
upper[byte_len] = b + 16;
|
||||
// increment done, stop iterating through the vec
|
||||
break;
|
||||
}
|
||||
// if it is 'f', reset the byte to 0 and do a carry
|
||||
|
19
src/info.rs
19
src/info.rs
@@ -1,6 +1,6 @@
|
||||
//! Relay metadata using NIP-11
|
||||
/// Relay Info
|
||||
use crate::config;
|
||||
use crate::config::Settings;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub const CARGO_PKG_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
|
||||
@@ -27,17 +27,26 @@ pub struct RelayInfo {
|
||||
}
|
||||
|
||||
/// Convert an Info configuration into public Relay Info
|
||||
impl From<config::Info> for RelayInfo {
|
||||
fn from(i: config::Info) -> Self {
|
||||
impl From<Settings> for RelayInfo {
|
||||
fn from(c: Settings) -> Self {
|
||||
let mut supported_nips = vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 33, 40, 42];
|
||||
|
||||
if c.authorization.nip42_auth {
|
||||
supported_nips.push(42);
|
||||
supported_nips.sort();
|
||||
}
|
||||
|
||||
let i = c.info;
|
||||
|
||||
RelayInfo {
|
||||
id: i.relay_url,
|
||||
name: i.name,
|
||||
description: i.description,
|
||||
pubkey: i.pubkey,
|
||||
contact: i.contact,
|
||||
supported_nips: Some(vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 26]),
|
||||
supported_nips: Some(supported_nips),
|
||||
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
|
||||
version: CARGO_PKG_VERSION.map(|x| x.to_owned()),
|
||||
version: CARGO_PKG_VERSION.map(std::borrow::ToOwned::to_owned),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,3 +1,4 @@
|
||||
pub mod cli;
|
||||
pub mod close;
|
||||
pub mod config;
|
||||
pub mod conn;
|
||||
@@ -8,8 +9,9 @@ pub mod event;
|
||||
pub mod hexrange;
|
||||
pub mod info;
|
||||
pub mod nip05;
|
||||
pub mod nauthz;
|
||||
pub mod notice;
|
||||
pub mod schema;
|
||||
pub mod repo;
|
||||
pub mod subscription;
|
||||
pub mod utils;
|
||||
// Public API for creating relays programatically
|
||||
|
57
src/main.rs
57
src/main.rs
@@ -1,50 +1,51 @@
|
||||
//! Server process
|
||||
|
||||
use clap::Parser;
|
||||
use nostr_rs_relay::cli::CLIArgs;
|
||||
use nostr_rs_relay::config;
|
||||
use nostr_rs_relay::server::start_server;
|
||||
use std::env;
|
||||
use std::sync::mpsc as syncmpsc;
|
||||
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
|
||||
use std::thread;
|
||||
use tracing::info;
|
||||
|
||||
use console_subscriber::ConsoleLayer;
|
||||
|
||||
/// Return a requested DB name from command line arguments.
|
||||
fn db_from_args(args: &[String]) -> Option<String> {
|
||||
if args.len() == 3 && args.get(1) == Some(&"--db".to_owned()) {
|
||||
return args.get(2).map(std::clone::Clone::clone);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Start running a Nostr relay server.
|
||||
fn main() {
|
||||
// setup tracing
|
||||
let _trace_sub = tracing_subscriber::fmt::try_init();
|
||||
info!("Starting up from main");
|
||||
// get database directory from args
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let db_dir: Option<String> = db_from_args(&args);
|
||||
// configure settings from config.toml
|
||||
// replace default settings with those read from config.toml
|
||||
let mut settings = config::Settings::new();
|
||||
let args = CLIArgs::parse();
|
||||
|
||||
// get config file name from args
|
||||
let config_file_arg = args.config;
|
||||
|
||||
// configure settings from the config file (defaults to config.toml)
|
||||
// replace default settings with those read from the config file
|
||||
let mut settings = config::Settings::new(&config_file_arg);
|
||||
|
||||
// setup tracing
|
||||
if settings.diagnostics.tracing {
|
||||
// enable tracing with tokio-console
|
||||
ConsoleLayer::builder().with_default_env().init();
|
||||
} else {
|
||||
// standard logging
|
||||
tracing_subscriber::fmt::try_init().unwrap();
|
||||
}
|
||||
// update with database location
|
||||
if let Some(db) = db_dir {
|
||||
settings.database.data_directory = db;
|
||||
}
|
||||
info!("Starting up from main");
|
||||
|
||||
// get database directory from args
|
||||
let db_dir_arg = args.db;
|
||||
|
||||
// update with database location from args, if provided
|
||||
if let Some(db_dir) = db_dir_arg {
|
||||
settings.database.data_directory = db_dir;
|
||||
}
|
||||
// we should have a 'control plane' channel to monitor and bump
|
||||
// the server. this will let us do stuff like clear the database,
|
||||
// shutdown, etc.; for now all this does is initiate shutdown if
|
||||
// `()` is sent. This will change in the future, this is just a
|
||||
// stopgap to shutdown the relay when it is used as a library.
|
||||
let (_, ctrl_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
|
||||
// run this in a new thread
|
||||
let handle = thread::spawn(|| {
|
||||
// we should have a 'control plane' channel to monitor and bump the server.
|
||||
// this will let us do stuff like clear the database, shutdown, etc.
|
||||
let _svr = start_server(settings, ctrl_rx);
|
||||
let handle = thread::spawn(move || {
|
||||
let _svr = start_server(&settings, ctrl_rx);
|
||||
});
|
||||
// block on nostr thread to finish.
|
||||
handle.join().unwrap();
|
||||
|
111
src/nauthz.rs
Normal file
111
src/nauthz.rs
Normal file
@@ -0,0 +1,111 @@
|
||||
use crate::error::{Error, Result};
|
||||
use crate::{event::Event, nip05::Nip05Name};
|
||||
use nauthz_grpc::authorization_client::AuthorizationClient;
|
||||
use nauthz_grpc::event::TagEntry;
|
||||
use nauthz_grpc::{Decision, Event as GrpcEvent, EventReply, EventRequest};
|
||||
use tracing::{info, warn};
|
||||
|
||||
pub mod nauthz_grpc {
|
||||
tonic::include_proto!("nauthz");
|
||||
}
|
||||
|
||||
// A decision for the DB to act upon
|
||||
pub trait AuthzDecision: Send + Sync {
|
||||
fn permitted(&self) -> bool;
|
||||
fn message(&self) -> Option<String>;
|
||||
}
|
||||
|
||||
impl AuthzDecision for EventReply {
|
||||
fn permitted(&self) -> bool {
|
||||
self.decision == Decision::Permit as i32
|
||||
}
|
||||
fn message(&self) -> Option<String> {
|
||||
self.message.clone()
|
||||
}
|
||||
}
|
||||
|
||||
// A connection to an event admission GRPC server
|
||||
pub struct EventAuthzService {
|
||||
server_addr: String,
|
||||
conn: Option<AuthorizationClient<tonic::transport::Channel>>,
|
||||
}
|
||||
|
||||
// conversion of Nip05Names into GRPC type
|
||||
impl std::convert::From<Nip05Name> for nauthz_grpc::event_request::Nip05Name {
|
||||
fn from(value: Nip05Name) -> Self {
|
||||
nauthz_grpc::event_request::Nip05Name {
|
||||
local: value.local.clone(),
|
||||
domain: value.domain.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// conversion of event tags into gprc struct
|
||||
fn tags_to_protobuf(tags: &Vec<Vec<String>>) -> Vec<TagEntry> {
|
||||
tags.iter()
|
||||
.map(|x| TagEntry { values: x.clone() })
|
||||
.collect()
|
||||
}
|
||||
|
||||
impl EventAuthzService {
|
||||
pub async fn connect(server_addr: &str) -> EventAuthzService {
|
||||
let mut eas = EventAuthzService {
|
||||
server_addr: server_addr.to_string(),
|
||||
conn: None,
|
||||
};
|
||||
eas.ready_connection().await;
|
||||
eas
|
||||
}
|
||||
|
||||
pub async fn ready_connection(self: &mut Self) {
|
||||
if self.conn.is_none() {
|
||||
let client = AuthorizationClient::connect(self.server_addr.to_string()).await;
|
||||
if let Err(ref msg) = client {
|
||||
warn!("could not connect to nostr authz GRPC server: {:?}", msg);
|
||||
} else {
|
||||
info!("connected to nostr authorization GRPC server");
|
||||
}
|
||||
self.conn = client.ok();
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn admit_event(
|
||||
self: &mut Self,
|
||||
event: &Event,
|
||||
ip: &str,
|
||||
origin: Option<String>,
|
||||
user_agent: Option<String>,
|
||||
nip05: Option<Nip05Name>,
|
||||
auth_pubkey: Option<Vec<u8>>
|
||||
) -> Result<Box<dyn AuthzDecision>> {
|
||||
self.ready_connection().await;
|
||||
let id_blob = hex::decode(&event.id)?;
|
||||
let pubkey_blob = hex::decode(&event.pubkey)?;
|
||||
let sig_blob = hex::decode(&event.sig)?;
|
||||
if let Some(ref mut c) = self.conn {
|
||||
let gevent = GrpcEvent {
|
||||
id: id_blob,
|
||||
pubkey: pubkey_blob,
|
||||
sig: sig_blob,
|
||||
created_at: event.created_at,
|
||||
kind: event.kind,
|
||||
content: event.content.clone(),
|
||||
tags: tags_to_protobuf(&event.tags),
|
||||
};
|
||||
let svr_res = c
|
||||
.event_admit(EventRequest {
|
||||
event: Some(gevent),
|
||||
ip_addr: Some(ip.to_string()),
|
||||
origin,
|
||||
user_agent,
|
||||
auth_pubkey,
|
||||
nip05: nip05.map(|x| nauthz_grpc::event_request::Nip05Name::from(x)),
|
||||
})
|
||||
.await?;
|
||||
let reply = svr_res.into_inner();
|
||||
return Ok(Box::new(reply));
|
||||
} else {
|
||||
return Err(Error::AuthzError);
|
||||
}
|
||||
}
|
||||
}
|
408
src/nip05.rs
408
src/nip05.rs
@@ -5,16 +5,14 @@
|
||||
//! consumes a stream of metadata events, and keeps a database table
|
||||
//! updated with the current NIP-05 verification status.
|
||||
use crate::config::VerifiedUsers;
|
||||
use crate::db;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::event::Event;
|
||||
use crate::utils::unix_time;
|
||||
use crate::repo::NostrRepo;
|
||||
use std::sync::Arc;
|
||||
use hyper::body::HttpBody;
|
||||
use hyper::client::connect::HttpConnector;
|
||||
use hyper::Client;
|
||||
use hyper_tls::HttpsConnector;
|
||||
use rand::Rng;
|
||||
use rusqlite::params;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use std::time::SystemTime;
|
||||
@@ -23,14 +21,12 @@ use tracing::{debug, info, warn};
|
||||
|
||||
/// NIP-05 verifier state
|
||||
pub struct Verifier {
|
||||
/// Repository for saving/retrieving events and records
|
||||
repo: Arc<dyn NostrRepo>,
|
||||
/// Metadata events for us to inspect
|
||||
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
|
||||
/// Newly validated events get written and then broadcast on this channel to subscribers
|
||||
event_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
/// SQLite read query pool
|
||||
read_pool: db::SqlitePool,
|
||||
/// SQLite write query pool
|
||||
write_pool: db::SqlitePool,
|
||||
/// Settings
|
||||
settings: crate::config::Settings,
|
||||
/// HTTP client
|
||||
@@ -46,13 +42,13 @@ pub struct Verifier {
|
||||
/// A NIP-05 identifier is a local part and domain.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Nip05Name {
|
||||
local: String,
|
||||
domain: String,
|
||||
pub local: String,
|
||||
pub domain: String,
|
||||
}
|
||||
|
||||
impl Nip05Name {
|
||||
/// Does this name represent the entire domain?
|
||||
pub fn is_domain_only(&self) -> bool {
|
||||
#[must_use] pub fn is_domain_only(&self) -> bool {
|
||||
self.local == "_"
|
||||
}
|
||||
|
||||
@@ -62,8 +58,8 @@ impl Nip05Name {
|
||||
"https://{}/.well-known/nostr.json?name={}",
|
||||
self.domain, self.local
|
||||
)
|
||||
.parse::<http::Uri>()
|
||||
.ok()
|
||||
.parse::<http::Uri>()
|
||||
.ok()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,16 +69,11 @@ impl std::convert::TryFrom<&str> for Nip05Name {
|
||||
fn try_from(inet: &str) -> Result<Self, Self::Error> {
|
||||
// break full name at the @ boundary.
|
||||
let components: Vec<&str> = inet.split('@').collect();
|
||||
if components.len() != 2 {
|
||||
Err(Error::CustomError("too many/few components".to_owned()))
|
||||
} else {
|
||||
if components.len() == 2 {
|
||||
// check if local name is valid
|
||||
let local = components[0];
|
||||
let domain = components[1];
|
||||
if local
|
||||
.chars()
|
||||
.all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.')
|
||||
{
|
||||
if local.chars().all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.') {
|
||||
if domain
|
||||
.chars()
|
||||
.all(|x| x.is_alphanumeric() || x == '-' || x == '.')
|
||||
@@ -101,6 +92,8 @@ impl std::convert::TryFrom<&str> for Nip05Name {
|
||||
"invalid character in local part".to_owned(),
|
||||
))
|
||||
}
|
||||
} else {
|
||||
Err(Error::CustomError("too many/few components".to_owned()))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -111,55 +104,30 @@ impl std::fmt::Display for Nip05Name {
|
||||
}
|
||||
}
|
||||
|
||||
// Current time, with a slight foward jitter in seconds
|
||||
fn now_jitter(sec: u64) -> u64 {
|
||||
// random time between now, and 10min in future.
|
||||
let mut rng = rand::thread_rng();
|
||||
let jitter_amount = rng.gen_range(0..sec);
|
||||
let now = unix_time();
|
||||
now.saturating_add(jitter_amount)
|
||||
}
|
||||
|
||||
/// Check if the specified username and address are present and match in this response body
|
||||
fn body_contains_user(username: &str, address: &str, bytes: hyper::body::Bytes) -> Result<bool> {
|
||||
fn body_contains_user(username: &str, address: &str, bytes: &hyper::body::Bytes) -> Result<bool> {
|
||||
// convert the body into json
|
||||
let body: serde_json::Value = serde_json::from_slice(&bytes)?;
|
||||
let body: serde_json::Value = serde_json::from_slice(bytes)?;
|
||||
// ensure we have a names object.
|
||||
let names_map = body
|
||||
.as_object()
|
||||
.and_then(|x| x.get("names"))
|
||||
.and_then(|x| x.as_object())
|
||||
.and_then(serde_json::Value::as_object)
|
||||
.ok_or_else(|| Error::CustomError("not a map".to_owned()))?;
|
||||
// get the pubkey for the requested user
|
||||
let check_name = names_map.get(username).and_then(|x| x.as_str());
|
||||
let check_name = names_map.get(username).and_then(serde_json::Value::as_str);
|
||||
// ensure the address is a match
|
||||
Ok(check_name.map(|x| x == address).unwrap_or(false))
|
||||
Ok(check_name.map_or(false, |x| x == address))
|
||||
}
|
||||
|
||||
impl Verifier {
|
||||
pub fn new(
|
||||
repo: Arc<dyn NostrRepo>,
|
||||
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
|
||||
event_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
settings: crate::config::Settings,
|
||||
) -> Result<Self> {
|
||||
info!("creating NIP-05 verifier");
|
||||
// build a database connection for reading and writing.
|
||||
let write_pool = db::build_pool(
|
||||
"nip05 writer",
|
||||
&settings,
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE,
|
||||
1, // min conns
|
||||
4, // max conns
|
||||
true, // wait for DB
|
||||
);
|
||||
let read_pool = db::build_pool(
|
||||
"nip05 reader",
|
||||
&settings,
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
|
||||
1, // min conns
|
||||
8, // max conns
|
||||
true, // wait for DB
|
||||
);
|
||||
// setup hyper client
|
||||
let https = HttpsConnector::new();
|
||||
let client = Client::builder().build::<_, hyper::Body>(https);
|
||||
@@ -175,10 +143,9 @@ impl Verifier {
|
||||
// duration.
|
||||
let reverify_interval = tokio::time::interval(http_wait_duration);
|
||||
Ok(Verifier {
|
||||
repo,
|
||||
metadata_rx,
|
||||
event_tx,
|
||||
read_pool,
|
||||
write_pool,
|
||||
settings,
|
||||
client,
|
||||
wait_after_finish,
|
||||
@@ -246,44 +213,40 @@ impl Verifier {
|
||||
|
||||
let response_fut = self.client.request(req);
|
||||
|
||||
// HTTP request with timeout
|
||||
match tokio::time::timeout(Duration::from_secs(5), response_fut).await {
|
||||
Ok(response_res) => {
|
||||
// limit size of verification document to 1MB.
|
||||
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
|
||||
let response = response_res?;
|
||||
// determine content length from response
|
||||
let response_content_length = match response.body().size_hint().upper() {
|
||||
Some(v) => v,
|
||||
None => MAX_ALLOWED_RESPONSE_SIZE + 1, // reject missing content length
|
||||
};
|
||||
// TODO: test how hyper handles the client providing an inaccurate content-length.
|
||||
if response_content_length <= MAX_ALLOWED_RESPONSE_SIZE {
|
||||
let (parts, body) = response.into_parts();
|
||||
// TODO: consider redirects
|
||||
if parts.status == http::StatusCode::OK {
|
||||
// parse body, determine if the username / key / address is present
|
||||
let body_bytes = hyper::body::to_bytes(body).await?;
|
||||
let body_matches = body_contains_user(&nip.local, pubkey, body_bytes)?;
|
||||
if body_matches {
|
||||
return Ok(UserWebVerificationStatus::Verified);
|
||||
}
|
||||
// successful response, parsed as a nip-05
|
||||
// document, but this name/pubkey was not
|
||||
// present.
|
||||
return Ok(UserWebVerificationStatus::Unverified);
|
||||
if let Ok(response_res) = tokio::time::timeout(Duration::from_secs(5), response_fut).await {
|
||||
// limit size of verification document to 1MB.
|
||||
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
|
||||
let response = response_res?;
|
||||
// determine content length from response
|
||||
let response_content_length = match response.body().size_hint().upper() {
|
||||
Some(v) => v,
|
||||
None => MAX_ALLOWED_RESPONSE_SIZE + 1, // reject missing content length
|
||||
};
|
||||
// TODO: test how hyper handles the client providing an inaccurate content-length.
|
||||
if response_content_length <= MAX_ALLOWED_RESPONSE_SIZE {
|
||||
let (parts, body) = response.into_parts();
|
||||
// TODO: consider redirects
|
||||
if parts.status == http::StatusCode::OK {
|
||||
// parse body, determine if the username / key / address is present
|
||||
let body_bytes = hyper::body::to_bytes(body).await?;
|
||||
let body_matches = body_contains_user(&nip.local, pubkey, &body_bytes)?;
|
||||
if body_matches {
|
||||
return Ok(UserWebVerificationStatus::Verified);
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
"content length missing or exceeded limits for account: {:?}",
|
||||
nip.to_string()
|
||||
);
|
||||
// successful response, parsed as a nip-05
|
||||
// document, but this name/pubkey was not
|
||||
// present.
|
||||
return Ok(UserWebVerificationStatus::Unverified);
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
"content length missing or exceeded limits for account: {:?}",
|
||||
nip.to_string()
|
||||
);
|
||||
}
|
||||
Err(_) => {
|
||||
info!("timeout verifying account {:?}", nip);
|
||||
return Ok(UserWebVerificationStatus::Unknown);
|
||||
}
|
||||
} else {
|
||||
info!("timeout verifying account {:?}", nip);
|
||||
return Ok(UserWebVerificationStatus::Unknown);
|
||||
}
|
||||
Ok(UserWebVerificationStatus::Unknown)
|
||||
}
|
||||
@@ -294,8 +257,15 @@ impl Verifier {
|
||||
// run a loop, restarting on failure
|
||||
loop {
|
||||
let res = self.run_internal().await;
|
||||
if let Err(e) = res {
|
||||
match res {
|
||||
Err(Error::ChannelClosed) => {
|
||||
// channel was closed, we are shutting down
|
||||
return;
|
||||
},
|
||||
Err(e) => {
|
||||
info!("error in verifier: {:?}", e);
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -309,7 +279,7 @@ impl Verifier {
|
||||
if let Some(naddr) = e.get_nip05_addr() {
|
||||
info!("got metadata event for ({:?},{:?})", naddr.to_string() ,e.get_author_prefix());
|
||||
// Process a new author, checking if they are verified:
|
||||
let check_verified = get_latest_user_verification(self.read_pool.get().expect("could not get connection"), &e.pubkey).await;
|
||||
let check_verified = self.repo.get_latest_user_verification(&e.pubkey).await;
|
||||
// ensure the event we got is more recent than the one we have, otherwise we can ignore it.
|
||||
if let Ok(last_check) = check_verified {
|
||||
if e.created_at <= last_check.event_created {
|
||||
@@ -342,6 +312,7 @@ impl Verifier {
|
||||
}
|
||||
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
|
||||
info!("metadata broadcast channel closed");
|
||||
return Err(Error::ChannelClosed);
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -370,7 +341,7 @@ impl Verifier {
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map(|x| x.as_secs())
|
||||
.unwrap_or(0);
|
||||
let vr = get_oldest_user_verification(self.read_pool.get()?, earliest_epoch).await;
|
||||
let vr = self.repo.get_oldest_user_verification(earliest_epoch).await;
|
||||
match vr {
|
||||
Ok(ref v) => {
|
||||
let new_status = self.get_web_verification(&v.name, &v.address).await;
|
||||
@@ -378,34 +349,37 @@ impl Verifier {
|
||||
UserWebVerificationStatus::Verified => {
|
||||
// freshly verified account, update the
|
||||
// timestamp.
|
||||
self.update_verification_record(self.write_pool.get()?, v)
|
||||
self.repo.update_verification_timestamp(v.rowid)
|
||||
.await?;
|
||||
info!("verification updated for {}", v.to_string());
|
||||
|
||||
}
|
||||
UserWebVerificationStatus::DomainNotAllowed
|
||||
| UserWebVerificationStatus::Unknown => {
|
||||
// server may be offline, or temporarily
|
||||
// blocked by the config file. Note the
|
||||
// failure so we can process something
|
||||
// else.
|
||||
| UserWebVerificationStatus::Unknown => {
|
||||
// server may be offline, or temporarily
|
||||
// blocked by the config file. Note the
|
||||
// failure so we can process something
|
||||
// else.
|
||||
|
||||
// have we had enough failures to give up?
|
||||
if v.failure_count >= max_failures as u64 {
|
||||
info!(
|
||||
"giving up on verifying {:?} after {} failures",
|
||||
v.name, v.failure_count
|
||||
);
|
||||
self.delete_verification_record(self.write_pool.get()?, v)
|
||||
.await?;
|
||||
} else {
|
||||
// record normal failure, incrementing failure count
|
||||
self.fail_verification_record(self.write_pool.get()?, v)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
// have we had enough failures to give up?
|
||||
if v.failure_count >= max_failures as u64 {
|
||||
info!(
|
||||
"giving up on verifying {:?} after {} failures",
|
||||
v.name, v.failure_count
|
||||
);
|
||||
self.repo.delete_verification(v.rowid)
|
||||
.await?;
|
||||
} else {
|
||||
// record normal failure, incrementing failure count
|
||||
info!("verification failed for {}", v.to_string());
|
||||
self.repo.fail_verification(v.rowid).await?;
|
||||
}
|
||||
}
|
||||
UserWebVerificationStatus::Unverified => {
|
||||
// domain has removed the verification, drop
|
||||
// the record on our side.
|
||||
self.delete_verification_record(self.write_pool.get()?, v)
|
||||
info!("verification rescinded for {}", v.to_string());
|
||||
self.repo.delete_verification(v.rowid)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
@@ -426,80 +400,6 @@ impl Verifier {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reset the verification timestamp on a VerificationRecord
|
||||
pub async fn update_verification_record(
|
||||
&mut self,
|
||||
mut conn: db::PooledConnection,
|
||||
vr: &VerificationRecord,
|
||||
) -> Result<()> {
|
||||
let vr_id = vr.rowid;
|
||||
let vr_str = vr.to_string();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
// add some jitter to the verification to prevent everything from stacking up together.
|
||||
let verif_time = now_jitter(600);
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
// update verification time and reset any failure count
|
||||
let query =
|
||||
"UPDATE user_verification SET verified_at=?, failure_count=0 WHERE id=?";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![verif_time, vr_id])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("verification updated for {}", vr_str);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
/// Reset the failure timestamp on a VerificationRecord
|
||||
pub async fn fail_verification_record(
|
||||
&mut self,
|
||||
mut conn: db::PooledConnection,
|
||||
vr: &VerificationRecord,
|
||||
) -> Result<()> {
|
||||
let vr_id = vr.rowid;
|
||||
let vr_str = vr.to_string();
|
||||
let fail_count = vr.failure_count.saturating_add(1);
|
||||
tokio::task::spawn_blocking(move || {
|
||||
// add some jitter to the verification to prevent everything from stacking up together.
|
||||
let fail_time = now_jitter(600);
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let query = "UPDATE user_verification SET failed_at=?, failure_count=? WHERE id=?";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![fail_time, fail_count, vr_id])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("verification failed for {}", vr_str);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
/// Delete a VerificationRecord that is no longer valid
|
||||
pub async fn delete_verification_record(
|
||||
&mut self,
|
||||
mut conn: db::PooledConnection,
|
||||
vr: &VerificationRecord,
|
||||
) -> Result<()> {
|
||||
let vr_id = vr.rowid;
|
||||
let vr_str = vr.to_string();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let query = "DELETE FROM user_verification WHERE id=?;";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![vr_id])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("verification rescinded for {}", vr_str);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
|
||||
/// Persist an event, create a verification record, and broadcast.
|
||||
// TODO: have more event-writing logic handled in the db module.
|
||||
// Right now, these events avoid the rate limit. That is
|
||||
@@ -513,11 +413,11 @@ impl Verifier {
|
||||
// disabled/passive, the event has already been persisted.
|
||||
let should_write_event = self.settings.verified_users.is_enabled();
|
||||
if should_write_event {
|
||||
match db::write_event(&mut self.write_pool.get()?, event) {
|
||||
match self.repo.write_event(event).await {
|
||||
Ok(updated) => {
|
||||
if updated != 0 {
|
||||
info!(
|
||||
"persisted event: {:?} in {:?}",
|
||||
"persisted event (new verified pubkey): {:?} in {:?}",
|
||||
event.get_event_id_prefix(),
|
||||
start.elapsed()
|
||||
);
|
||||
@@ -533,7 +433,7 @@ impl Verifier {
|
||||
}
|
||||
}
|
||||
// write the verification record
|
||||
save_verification_record(self.write_pool.get()?, event, name).await?;
|
||||
self.repo.create_verification_record(&event.id, name).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -563,7 +463,7 @@ pub struct VerificationRecord {
|
||||
|
||||
/// Check with settings to determine if a given domain is allowed to
|
||||
/// publish.
|
||||
pub fn is_domain_allowed(
|
||||
#[must_use] pub fn is_domain_allowed(
|
||||
domain: &str,
|
||||
whitelist: &Option<Vec<String>>,
|
||||
blacklist: &Option<Vec<String>>,
|
||||
@@ -583,7 +483,7 @@ pub fn is_domain_allowed(
|
||||
impl VerificationRecord {
|
||||
/// Check if the record is recent enough to be considered valid,
|
||||
/// and the domain is allowed.
|
||||
pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
|
||||
#[must_use] pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
|
||||
//let settings = SETTINGS.read().unwrap();
|
||||
// how long a verification record is good for
|
||||
let nip05_expiration = &verified_users_settings.verify_expiration_duration;
|
||||
@@ -630,130 +530,6 @@ impl std::fmt::Display for VerificationRecord {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new verification record based on an event
|
||||
pub async fn save_verification_record(
|
||||
mut conn: db::PooledConnection,
|
||||
event: &Event,
|
||||
name: &str,
|
||||
) -> Result<()> {
|
||||
let e = hex::decode(&event.id).ok();
|
||||
let n = name.to_owned();
|
||||
let a_prefix = event.get_author_prefix();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
// if we create a /new/ one, we should get rid of any old ones. or group the new ones by name and only consider the latest.
|
||||
let query = "INSERT INTO user_verification (metadata_event, name, verified_at) VALUES ((SELECT id from event WHERE event_hash=?), ?, strftime('%s','now'));";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![e, n])?;
|
||||
// get the row ID
|
||||
let v_id = tx.last_insert_rowid();
|
||||
// delete everything else by this name
|
||||
let del_query = "DELETE FROM user_verification WHERE name = ? AND id != ?;";
|
||||
let mut del_stmt = tx.prepare(del_query)?;
|
||||
let count = del_stmt.execute(params![n,v_id])?;
|
||||
if count > 0 {
|
||||
info!("removed {} old verification records for ({:?},{:?})", count, n, a_prefix);
|
||||
}
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("saved new verification record for ({:?},{:?})", n, a_prefix);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
}).await?
|
||||
}
|
||||
|
||||
/// Retrieve the most recent verification record for a given pubkey (async).
|
||||
pub async fn get_latest_user_verification(
|
||||
conn: db::PooledConnection,
|
||||
pubkey: &str,
|
||||
) -> Result<VerificationRecord> {
|
||||
let p = pubkey.to_owned();
|
||||
tokio::task::spawn_blocking(move || query_latest_user_verification(conn, p)).await?
|
||||
}
|
||||
|
||||
/// Query database for the latest verification record for a given pubkey.
|
||||
pub fn query_latest_user_verification(
|
||||
mut conn: db::PooledConnection,
|
||||
pubkey: String,
|
||||
) -> Result<VerificationRecord> {
|
||||
let tx = conn.transaction()?;
|
||||
let query = "SELECT v.id, v.name, e.event_hash, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE e.author=? ORDER BY e.created_at DESC, v.verified_at DESC, v.failed_at DESC LIMIT 1;";
|
||||
let mut stmt = tx.prepare_cached(query)?;
|
||||
let fields = stmt.query_row(params![hex::decode(&pubkey).ok()], |r| {
|
||||
let rowid: u64 = r.get(0)?;
|
||||
let rowname: String = r.get(1)?;
|
||||
let eventid: Vec<u8> = r.get(2)?;
|
||||
let created_at: u64 = r.get(3)?;
|
||||
// create a tuple since we can't throw non-rusqlite errors in this closure
|
||||
Ok((
|
||||
rowid,
|
||||
rowname,
|
||||
eventid,
|
||||
created_at,
|
||||
r.get(4).ok(),
|
||||
r.get(5).ok(),
|
||||
r.get(6)?,
|
||||
))
|
||||
})?;
|
||||
Ok(VerificationRecord {
|
||||
rowid: fields.0,
|
||||
name: Nip05Name::try_from(&fields.1[..])?,
|
||||
address: pubkey,
|
||||
event: hex::encode(fields.2),
|
||||
event_created: fields.3,
|
||||
last_success: fields.4,
|
||||
last_failure: fields.5,
|
||||
failure_count: fields.6,
|
||||
})
|
||||
}
|
||||
|
||||
/// Retrieve the oldest user verification (async)
|
||||
pub async fn get_oldest_user_verification(
|
||||
conn: db::PooledConnection,
|
||||
earliest: u64,
|
||||
) -> Result<VerificationRecord> {
|
||||
tokio::task::spawn_blocking(move || query_oldest_user_verification(conn, earliest)).await?
|
||||
}
|
||||
|
||||
pub fn query_oldest_user_verification(
|
||||
mut conn: db::PooledConnection,
|
||||
earliest: u64,
|
||||
) -> Result<VerificationRecord> {
|
||||
let tx = conn.transaction()?;
|
||||
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
|
||||
let mut stmt = tx.prepare_cached(query)?;
|
||||
let fields = stmt.query_row(params![earliest, earliest], |r| {
|
||||
let rowid: u64 = r.get(0)?;
|
||||
let rowname: String = r.get(1)?;
|
||||
let eventid: Vec<u8> = r.get(2)?;
|
||||
let pubkey: Vec<u8> = r.get(3)?;
|
||||
let created_at: u64 = r.get(4)?;
|
||||
// create a tuple since we can't throw non-rusqlite errors in this closure
|
||||
Ok((
|
||||
rowid,
|
||||
rowname,
|
||||
eventid,
|
||||
pubkey,
|
||||
created_at,
|
||||
r.get(5).ok(),
|
||||
r.get(6).ok(),
|
||||
r.get(7)?,
|
||||
))
|
||||
})?;
|
||||
let vr = VerificationRecord {
|
||||
rowid: fields.0,
|
||||
name: Nip05Name::try_from(&fields.1[..])?,
|
||||
address: hex::encode(fields.3),
|
||||
event: hex::encode(fields.2),
|
||||
event_created: fields.4,
|
||||
last_success: fields.5,
|
||||
last_failure: fields.6,
|
||||
failure_count: fields.7,
|
||||
};
|
||||
Ok(vr)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -762,7 +538,7 @@ mod tests {
|
||||
fn local_from_inet() {
|
||||
let addr = "bob@example.com";
|
||||
let parsed = Nip05Name::try_from(addr);
|
||||
assert!(!parsed.is_err());
|
||||
assert!(parsed.is_ok());
|
||||
let v = parsed.unwrap();
|
||||
assert_eq!(v.local, "bob");
|
||||
assert_eq!(v.domain, "example.com");
|
||||
|
@@ -16,21 +16,18 @@ pub struct EventResult {
|
||||
pub enum Notice {
|
||||
Message(String),
|
||||
EventResult(EventResult),
|
||||
AuthChallenge(String)
|
||||
}
|
||||
|
||||
impl EventResultStatus {
|
||||
pub fn to_bool(&self) -> bool {
|
||||
#[must_use] pub fn to_bool(&self) -> bool {
|
||||
match self {
|
||||
Self::Saved => true,
|
||||
Self::Duplicate => true,
|
||||
Self::Invalid => false,
|
||||
Self::Blocked => false,
|
||||
Self::RateLimited => false,
|
||||
Self::Error => false,
|
||||
Self::Duplicate | Self::Saved => true,
|
||||
Self::Invalid |Self::Blocked | Self::RateLimited | Self::Error => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn prefix(&self) -> &'static str {
|
||||
#[must_use] pub fn prefix(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Saved => "saved",
|
||||
Self::Duplicate => "duplicate",
|
||||
@@ -47,7 +44,7 @@ impl Notice {
|
||||
// Notice::err_msg(format!("{}", err), id)
|
||||
//}
|
||||
|
||||
pub fn message(msg: String) -> Notice {
|
||||
#[must_use] pub fn message(msg: String) -> Notice {
|
||||
Notice::Message(msg)
|
||||
}
|
||||
|
||||
@@ -56,27 +53,27 @@ impl Notice {
|
||||
Notice::EventResult(EventResult { id, msg, status })
|
||||
}
|
||||
|
||||
pub fn invalid(id: String, msg: &str) -> Notice {
|
||||
#[must_use] pub fn invalid(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Invalid)
|
||||
}
|
||||
|
||||
pub fn blocked(id: String, msg: &str) -> Notice {
|
||||
#[must_use] pub fn blocked(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Blocked)
|
||||
}
|
||||
|
||||
pub fn rate_limited(id: String, msg: &str) -> Notice {
|
||||
#[must_use] pub fn rate_limited(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::RateLimited)
|
||||
}
|
||||
|
||||
pub fn duplicate(id: String) -> Notice {
|
||||
#[must_use] pub fn duplicate(id: String) -> Notice {
|
||||
Notice::prefixed(id, "", EventResultStatus::Duplicate)
|
||||
}
|
||||
|
||||
pub fn error(id: String, msg: &str) -> Notice {
|
||||
#[must_use] pub fn error(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Error)
|
||||
}
|
||||
|
||||
pub fn saved(id: String) -> Notice {
|
||||
#[must_use] pub fn saved(id: String) -> Notice {
|
||||
Notice::EventResult(EventResult {
|
||||
id,
|
||||
msg: "".into(),
|
||||
|
69
src/repo/mod.rs
Normal file
69
src/repo/mod.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
use crate::db::QueryResult;
|
||||
use crate::error::Result;
|
||||
use crate::event::Event;
|
||||
use crate::nip05::VerificationRecord;
|
||||
use crate::subscription::Subscription;
|
||||
use crate::utils::unix_time;
|
||||
use async_trait::async_trait;
|
||||
use rand::Rng;
|
||||
|
||||
pub mod sqlite;
|
||||
pub mod sqlite_migration;
|
||||
pub mod postgres;
|
||||
pub mod postgres_migration;
|
||||
|
||||
#[async_trait]
|
||||
pub trait NostrRepo: Send + Sync {
|
||||
/// Start the repository (any initialization or maintenance tasks can be kicked off here)
|
||||
async fn start(&self) -> Result<()>;
|
||||
|
||||
/// Run migrations and return current version
|
||||
async fn migrate_up(&self) -> Result<usize>;
|
||||
|
||||
/// Persist event to database
|
||||
async fn write_event(&self, e: &Event) -> Result<u64>;
|
||||
|
||||
/// Perform a database query using a subscription.
|
||||
///
|
||||
/// The [`Subscription`] is converted into a SQL query. Each result
|
||||
/// is published on the `query_tx` channel as it is returned. If a
|
||||
/// message becomes available on the `abandon_query_rx` channel, the
|
||||
/// query is immediately aborted.
|
||||
async fn query_subscription(
|
||||
&self,
|
||||
sub: Subscription,
|
||||
client_id: String,
|
||||
query_tx: tokio::sync::mpsc::Sender<QueryResult>,
|
||||
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
|
||||
) -> Result<()>;
|
||||
|
||||
/// Perform normal maintenance
|
||||
async fn optimize_db(&self) -> Result<()>;
|
||||
|
||||
/// Create a new verification record connected to a specific event
|
||||
async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()>;
|
||||
|
||||
/// Update verification timestamp
|
||||
async fn update_verification_timestamp(&self, id: u64) -> Result<()>;
|
||||
|
||||
/// Update verification record as failed
|
||||
async fn fail_verification(&self, id: u64) -> Result<()>;
|
||||
|
||||
/// Delete verification record
|
||||
async fn delete_verification(&self, id: u64) -> Result<()>;
|
||||
|
||||
/// Get the latest verification record for a given pubkey.
|
||||
async fn get_latest_user_verification(&self, pub_key: &str) -> Result<VerificationRecord>;
|
||||
|
||||
/// Get oldest verification before timestamp
|
||||
async fn get_oldest_user_verification(&self, before: u64) -> Result<VerificationRecord>;
|
||||
}
|
||||
|
||||
// Current time, with a slight forward jitter in seconds
|
||||
pub(crate) fn now_jitter(sec: u64) -> u64 {
|
||||
// random time between now, and 10min in future.
|
||||
let mut rng = rand::thread_rng();
|
||||
let jitter_amount = rng.gen_range(0..sec);
|
||||
let now = unix_time();
|
||||
now.saturating_add(jitter_amount)
|
||||
}
|
786
src/repo/postgres.rs
Normal file
786
src/repo/postgres.rs
Normal file
@@ -0,0 +1,786 @@
|
||||
use crate::db::QueryResult;
|
||||
use crate::error::Result;
|
||||
use crate::event::{single_char_tagname, Event};
|
||||
use crate::nip05::{Nip05Name, VerificationRecord};
|
||||
use crate::repo::{now_jitter, NostrRepo};
|
||||
use crate::subscription::{ReqFilter, Subscription};
|
||||
use async_std::stream::StreamExt;
|
||||
use async_trait::async_trait;
|
||||
use chrono::{DateTime, TimeZone, Utc};
|
||||
use sqlx::postgres::PgRow;
|
||||
use sqlx::{Error, Execute, FromRow, Postgres, QueryBuilder, Row};
|
||||
use std::time::{Duration, Instant};
|
||||
use sqlx::Error::RowNotFound;
|
||||
|
||||
use crate::hexrange::{hex_range, HexSearch};
|
||||
use crate::repo::postgres_migration::run_migrations;
|
||||
use crate::server::NostrMetrics;
|
||||
use crate::utils::{is_hex, is_lower_hex, self};
|
||||
use tokio::sync::mpsc::Sender;
|
||||
use tokio::sync::oneshot::Receiver;
|
||||
use tracing::log::trace;
|
||||
use tracing::{debug, error, warn, info};
|
||||
use crate::error;
|
||||
|
||||
pub type PostgresPool = sqlx::pool::Pool<Postgres>;
|
||||
|
||||
pub struct PostgresRepo {
|
||||
conn: PostgresPool,
|
||||
metrics: NostrMetrics,
|
||||
}
|
||||
|
||||
impl PostgresRepo {
|
||||
pub fn new(c: PostgresPool, m: NostrMetrics) -> PostgresRepo {
|
||||
PostgresRepo {
|
||||
conn: c,
|
||||
metrics: m,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
/// Cleanup expired events on a regular basis
|
||||
async fn cleanup_expired(conn: PostgresPool, frequency: Duration) -> Result<()> {
|
||||
tokio::task::spawn(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = tokio::time::sleep(frequency) => {
|
||||
let start = Instant::now();
|
||||
let exp_res = delete_expired(conn.clone()).await;
|
||||
match exp_res {
|
||||
Ok(exp_count) => {
|
||||
if exp_count > 0 {
|
||||
info!("removed {} expired events in: {:?}", exp_count, start.elapsed());
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
warn!("could not remove expired events due to error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// One-time deletion of all expired events
|
||||
async fn delete_expired(conn:PostgresPool) -> Result<u64> {
|
||||
let mut tx = conn.begin().await?;
|
||||
let update_count = sqlx::query("DELETE FROM \"event\" WHERE expires_at <= $1;")
|
||||
.bind(Utc.timestamp_opt(utils::unix_time() as i64, 0).unwrap())
|
||||
.execute(&mut tx)
|
||||
.await?.rows_affected();
|
||||
tx.commit().await?;
|
||||
Ok(update_count)
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl NostrRepo for PostgresRepo {
|
||||
|
||||
async fn start(&self) -> Result<()> {
|
||||
// begin a cleanup task for expired events.
|
||||
cleanup_expired(self.conn.clone(), Duration::from_secs(600)).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn migrate_up(&self) -> Result<usize> {
|
||||
Ok(run_migrations(&self.conn).await?)
|
||||
}
|
||||
|
||||
async fn write_event(&self, e: &Event) -> Result<u64> {
|
||||
// start transaction
|
||||
let mut tx = self.conn.begin().await?;
|
||||
let start = Instant::now();
|
||||
|
||||
// get relevant fields from event and convert to blobs.
|
||||
let id_blob = hex::decode(&e.id).ok();
|
||||
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
|
||||
let delegator_blob: Option<Vec<u8>> =
|
||||
e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
|
||||
let event_str = serde_json::to_string(&e).unwrap();
|
||||
|
||||
// determine if this event would be shadowed by an existing
|
||||
// replaceable event or parameterized replaceable event.
|
||||
if e.is_replaceable() {
|
||||
let repl_count = sqlx::query(
|
||||
"SELECT e.id FROM event e WHERE e.pub_key=$1 AND e.kind=$2 AND e.created_at >= $3 LIMIT 1;")
|
||||
.bind(&pubkey_blob)
|
||||
.bind(e.kind as i64)
|
||||
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
|
||||
.fetch_optional(&mut tx)
|
||||
.await?;
|
||||
if repl_count.is_some() {
|
||||
return Ok(0);
|
||||
}
|
||||
}
|
||||
if let Some(d_tag) = e.distinct_param() {
|
||||
let repl_count:i64 = if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) {
|
||||
sqlx::query_scalar(
|
||||
"SELECT count(*) AS count FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.pub_key=$1 AND e.kind=$2 AND t.name='d' AND t.value_hex=$3 AND e.created_at >= $4 LIMIT 1;")
|
||||
.bind(hex::decode(&e.pubkey).ok())
|
||||
.bind(e.kind as i64)
|
||||
.bind(hex::decode(d_tag).ok())
|
||||
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
|
||||
.fetch_one(&mut tx)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_scalar(
|
||||
"SELECT count(*) AS count FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.pub_key=$1 AND e.kind=$2 AND t.name='d' AND t.value=$3 AND e.created_at >= $4 LIMIT 1;")
|
||||
.bind(hex::decode(&e.pubkey).ok())
|
||||
.bind(e.kind as i64)
|
||||
.bind(d_tag.as_bytes())
|
||||
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
|
||||
.fetch_one(&mut tx)
|
||||
.await?
|
||||
};
|
||||
// if any rows were returned, then some newer event with
|
||||
// the same author/kind/tag value exist, and we can ignore
|
||||
// this event.
|
||||
if repl_count > 0 {
|
||||
return Ok(0)
|
||||
}
|
||||
}
|
||||
// ignore if the event hash is a duplicate.
|
||||
let mut ins_count = sqlx::query(
|
||||
r#"INSERT INTO "event"
|
||||
(id, pub_key, created_at, expires_at, kind, "content", delegated_by)
|
||||
VALUES($1, $2, $3, $4, $5, $6, $7)
|
||||
ON CONFLICT (id) DO NOTHING"#,
|
||||
)
|
||||
.bind(&id_blob)
|
||||
.bind(&pubkey_blob)
|
||||
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
|
||||
.bind(e.expiration().and_then(|x| Utc.timestamp_opt(x as i64, 0).latest()))
|
||||
.bind(e.kind as i64)
|
||||
.bind(event_str.into_bytes())
|
||||
.bind(delegator_blob)
|
||||
.execute(&mut tx)
|
||||
.await?
|
||||
.rows_affected();
|
||||
|
||||
if ins_count == 0 {
|
||||
// if the event was a duplicate, no need to insert event or
|
||||
// pubkey references. This will abort the txn.
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
// add all tags to the tag table
|
||||
for tag in e.tags.iter() {
|
||||
// ensure we have 2 values.
|
||||
if tag.len() >= 2 {
|
||||
let tag_name = &tag[0];
|
||||
let tag_val = &tag[1];
|
||||
// only single-char tags are searchable
|
||||
let tag_char_opt = single_char_tagname(tag_name);
|
||||
match &tag_char_opt {
|
||||
Some(_) => {
|
||||
// if tag value is lowercase hex;
|
||||
if is_lower_hex(tag_val) && (tag_val.len() % 2 == 0) {
|
||||
sqlx::query("INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES($1, $2, NULL, $3) \
|
||||
ON CONFLICT (event_id, \"name\", value, value_hex) DO NOTHING")
|
||||
.bind(&id_blob)
|
||||
.bind(tag_name)
|
||||
.bind(hex::decode(tag_val).ok())
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
} else {
|
||||
sqlx::query("INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES($1, $2, $3, NULL) \
|
||||
ON CONFLICT (event_id, \"name\", value, value_hex) DO NOTHING")
|
||||
.bind(&id_blob)
|
||||
.bind(tag_name)
|
||||
.bind(tag_val.as_bytes())
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
if e.is_replaceable() {
|
||||
let update_count = sqlx::query("DELETE FROM \"event\" WHERE kind=$1 and pub_key = $2 and id not in (select id from \"event\" where kind=$1 and pub_key=$2 order by created_at desc limit 1);")
|
||||
.bind(e.kind as i64)
|
||||
.bind(hex::decode(&e.pubkey).ok())
|
||||
.execute(&mut tx)
|
||||
.await?.rows_affected();
|
||||
if update_count > 0 {
|
||||
info!(
|
||||
"hid {} older replaceable kind {} events for author: {:?}",
|
||||
update_count,
|
||||
e.kind,
|
||||
e.get_author_prefix()
|
||||
);
|
||||
}
|
||||
}
|
||||
// parameterized replaceable events
|
||||
// check for parameterized replaceable events that would be hidden; don't insert these either.
|
||||
if let Some(d_tag) = e.distinct_param() {
|
||||
let update_count = if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) {
|
||||
sqlx::query("DELETE FROM event WHERE kind=$1 AND pub_key=$2 AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=$1 AND e.pub_key=$2 AND t.name='d' AND t.value_hex=$3 ORDER BY created_at DESC OFFSET 1);")
|
||||
.bind(e.kind as i64)
|
||||
.bind(hex::decode(&e.pubkey).ok())
|
||||
.bind(hex::decode(d_tag).ok())
|
||||
.execute(&mut tx)
|
||||
.await?.rows_affected()
|
||||
} else {
|
||||
sqlx::query("DELETE FROM event WHERE kind=$1 AND pub_key=$2 AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=$1 AND e.pub_key=$2 AND t.name='d' AND t.value=$3 ORDER BY created_at DESC OFFSET 1);")
|
||||
.bind(e.kind as i64)
|
||||
.bind(hex::decode(&e.pubkey).ok())
|
||||
.bind(d_tag.as_bytes())
|
||||
.execute(&mut tx)
|
||||
.await?.rows_affected()
|
||||
};
|
||||
if update_count > 0 {
|
||||
info!(
|
||||
"removed {} older parameterized replaceable kind {} events for author: {:?}",
|
||||
update_count,
|
||||
e.kind,
|
||||
e.get_author_prefix()
|
||||
);
|
||||
}
|
||||
}
|
||||
// if this event is a deletion, hide the referenced events from the same author.
|
||||
if e.kind == 5 {
|
||||
let event_candidates = e.tag_values_by_name("e");
|
||||
let pub_keys: Vec<Vec<u8>> = event_candidates
|
||||
.iter()
|
||||
.filter(|x| is_hex(x) && x.len() == 64)
|
||||
.filter_map(|x| hex::decode(x).ok())
|
||||
.collect();
|
||||
|
||||
let mut builder = QueryBuilder::new(
|
||||
"UPDATE \"event\" SET hidden = 1::bit(1) WHERE kind != 5 AND pub_key = ",
|
||||
);
|
||||
builder.push_bind(hex::decode(&e.pubkey).ok());
|
||||
builder.push(" AND id IN (");
|
||||
|
||||
let mut sep = builder.separated(", ");
|
||||
for pk in pub_keys {
|
||||
sep.push_bind(pk);
|
||||
}
|
||||
sep.push_unseparated(")");
|
||||
|
||||
let update_count = builder.build().execute(&mut tx).await?.rows_affected();
|
||||
info!(
|
||||
"hid {} deleted events for author {:?}",
|
||||
update_count,
|
||||
e.get_author_prefix()
|
||||
);
|
||||
} else {
|
||||
// check if a deletion has already been recorded for this event.
|
||||
// Only relevant for non-deletion events
|
||||
let del_count = sqlx::query(
|
||||
"SELECT e.id FROM \"event\" e \
|
||||
LEFT JOIN tag t ON e.id = t.event_id \
|
||||
WHERE e.pub_key = $1 AND t.\"name\" = 'e' AND e.kind = 5 AND t.value = $2 LIMIT 1",
|
||||
)
|
||||
.bind(&pubkey_blob)
|
||||
.bind(&id_blob)
|
||||
.fetch_optional(&mut tx)
|
||||
.await?;
|
||||
|
||||
// check if a the query returned a result, meaning we should
|
||||
// hid the current event
|
||||
if del_count.is_some() {
|
||||
// a deletion already existed, mark original event as hidden.
|
||||
info!(
|
||||
"hid event: {:?} due to existing deletion by author: {:?}",
|
||||
e.get_event_id_prefix(),
|
||||
e.get_author_prefix()
|
||||
);
|
||||
sqlx::query("UPDATE \"event\" SET hidden = 1::bit(1) WHERE id = $1")
|
||||
.bind(&id_blob)
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
// event was deleted, so let caller know nothing new
|
||||
// arrived, preventing this from being sent to active
|
||||
// subscriptions
|
||||
ins_count = 0;
|
||||
}
|
||||
}
|
||||
tx.commit().await?;
|
||||
self.metrics
|
||||
.write_events
|
||||
.observe(start.elapsed().as_secs_f64());
|
||||
Ok(ins_count)
|
||||
}
|
||||
|
||||
async fn query_subscription(
|
||||
&self,
|
||||
sub: Subscription,
|
||||
client_id: String,
|
||||
query_tx: Sender<QueryResult>,
|
||||
mut abandon_query_rx: Receiver<()>,
|
||||
) -> Result<()> {
|
||||
let start = Instant::now();
|
||||
let mut row_count: usize = 0;
|
||||
let metrics = &self.metrics;
|
||||
|
||||
for filter in sub.filters.iter() {
|
||||
let start = Instant::now();
|
||||
// generate SQL query
|
||||
let q_filter = query_from_filter(filter);
|
||||
if q_filter.is_none() {
|
||||
debug!("Failed to generate query!");
|
||||
continue;
|
||||
}
|
||||
|
||||
debug!("SQL generated in {:?}", start.elapsed());
|
||||
|
||||
// cutoff for displaying slow queries
|
||||
let slow_cutoff = Duration::from_millis(2000);
|
||||
|
||||
// any client that doesn't cause us to generate new rows in 5
|
||||
// seconds gets dropped.
|
||||
let abort_cutoff = Duration::from_secs(5);
|
||||
|
||||
let start = Instant::now();
|
||||
let mut slow_first_event;
|
||||
let mut last_successful_send = Instant::now();
|
||||
|
||||
// execute the query. Don't cache, since queries vary so much.
|
||||
let mut q_filter = q_filter.unwrap();
|
||||
let q_build = q_filter.build();
|
||||
let sql = q_build.sql();
|
||||
let mut results = q_build.fetch(&self.conn);
|
||||
|
||||
let mut first_result = true;
|
||||
while let Some(row) = results.next().await {
|
||||
if let Err(e) = row {
|
||||
error!("Query failed: {} {} {:?}", e, sql, filter);
|
||||
break;
|
||||
}
|
||||
let first_event_elapsed = start.elapsed();
|
||||
slow_first_event = first_event_elapsed >= slow_cutoff;
|
||||
if first_result {
|
||||
debug!(
|
||||
"first result in {:?} (cid: {}, sub: {:?})",
|
||||
first_event_elapsed, client_id, sub.id
|
||||
);
|
||||
first_result = false;
|
||||
}
|
||||
|
||||
// logging for slow queries; show sub and SQL.
|
||||
// to reduce logging; only show 1/16th of clients (leading 0)
|
||||
if slow_first_event && client_id.starts_with("00") {
|
||||
debug!(
|
||||
"query req (slow): {:?} (cid: {}, sub: {:?})",
|
||||
&sub, client_id, sub.id
|
||||
);
|
||||
} else {
|
||||
trace!(
|
||||
"query req: {:?} (cid: {}, sub: {:?})",
|
||||
&sub,
|
||||
client_id,
|
||||
sub.id
|
||||
);
|
||||
}
|
||||
|
||||
// check if this is still active; every 100 rows
|
||||
if row_count % 100 == 0 && abandon_query_rx.try_recv().is_ok() {
|
||||
debug!("query cancelled by client (cid: {}, sub: {:?})", client_id, sub.id);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
row_count += 1;
|
||||
let event_json: Vec<u8> = row.unwrap().get(0);
|
||||
loop {
|
||||
if query_tx.capacity() != 0 {
|
||||
// we have capacity to add another item
|
||||
break;
|
||||
} else {
|
||||
// the queue is full
|
||||
trace!("db reader thread is stalled");
|
||||
if last_successful_send + abort_cutoff < Instant::now() {
|
||||
// the queue has been full for too long, abort
|
||||
info!("aborting database query due to slow client");
|
||||
metrics.query_aborts.with_label_values(&["slowclient"]).inc();
|
||||
return Ok(());
|
||||
}
|
||||
// give the queue a chance to clear before trying again
|
||||
async_std::task::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: we could use try_send, but we'd have to juggle
|
||||
// getting the query result back as part of the error
|
||||
// result.
|
||||
query_tx
|
||||
.send(QueryResult {
|
||||
sub_id: sub.get_id(),
|
||||
event: String::from_utf8(event_json).unwrap(),
|
||||
})
|
||||
.await
|
||||
.ok();
|
||||
last_successful_send = Instant::now();
|
||||
}
|
||||
}
|
||||
query_tx
|
||||
.send(QueryResult {
|
||||
sub_id: sub.get_id(),
|
||||
event: "EOSE".to_string(),
|
||||
})
|
||||
.await
|
||||
.ok();
|
||||
self.metrics
|
||||
.query_sub
|
||||
.observe(start.elapsed().as_secs_f64());
|
||||
debug!(
|
||||
"query completed in {:?} (cid: {}, sub: {:?}, db_time: {:?}, rows: {})",
|
||||
start.elapsed(),
|
||||
client_id,
|
||||
sub.id,
|
||||
start.elapsed(),
|
||||
row_count
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn optimize_db(&self) -> Result<()> {
|
||||
// Not implemented
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()> {
|
||||
let mut tx = self.conn.begin().await?;
|
||||
|
||||
sqlx::query("DELETE FROM user_verification WHERE \"name\" = $1")
|
||||
.bind(name)
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
|
||||
sqlx::query("INSERT INTO user_verification (event_id, \"name\", verified_at) VALUES ($1, $2, now())")
|
||||
.bind(hex::decode(event_id).ok())
|
||||
.bind(name)
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await?;
|
||||
info!("saved new verification record for ({:?})", name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_verification_timestamp(&self, id: u64) -> Result<()> {
|
||||
// add some jitter to the verification to prevent everything from stacking up together.
|
||||
let verify_time = now_jitter(600);
|
||||
|
||||
// update verification time and reset any failure count
|
||||
sqlx::query(
|
||||
"UPDATE user_verification SET verified_at = $1, fail_count = 0 WHERE id = $2",
|
||||
)
|
||||
.bind(Utc.timestamp_opt(verify_time as i64, 0).unwrap())
|
||||
.bind(id as i64)
|
||||
.execute(&self.conn)
|
||||
.await?;
|
||||
|
||||
info!("verification updated for {}", id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn fail_verification(&self, id: u64) -> Result<()> {
|
||||
sqlx::query("UPDATE user_verification SET failed_at = now(), fail_count = fail_count + 1 WHERE id = $1")
|
||||
.bind(id as i64)
|
||||
.execute(&self.conn)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_verification(&self, id: u64) -> Result<()> {
|
||||
sqlx::query("DELETE FROM user_verification WHERE id = $1")
|
||||
.bind(id as i64)
|
||||
.execute(&self.conn)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_latest_user_verification(&self, pub_key: &str) -> Result<VerificationRecord> {
|
||||
let query = r#"SELECT
|
||||
v.id,
|
||||
v."name",
|
||||
e.id as event_id,
|
||||
e.pub_key,
|
||||
e.created_at,
|
||||
v.verified_at,
|
||||
v.failed_at,
|
||||
v.fail_count
|
||||
FROM user_verification v
|
||||
LEFT JOIN "event" e ON e.id = v.event_id
|
||||
WHERE e.pub_key = $1
|
||||
ORDER BY e.created_at DESC, v.verified_at DESC, v.failed_at DESC
|
||||
LIMIT 1"#;
|
||||
sqlx::query_as::<_, VerificationRecord>(query)
|
||||
.bind(hex::decode(pub_key).ok())
|
||||
.fetch_optional(&self.conn)
|
||||
.await?
|
||||
.ok_or(error::Error::SqlxError(RowNotFound))
|
||||
}
|
||||
|
||||
async fn get_oldest_user_verification(&self, before: u64) -> Result<VerificationRecord> {
|
||||
let query = r#"SELECT
|
||||
v.id,
|
||||
v."name",
|
||||
e.id as event_id,
|
||||
e.pub_key,
|
||||
e.created_at,
|
||||
v.verified_at,
|
||||
v.failed_at,
|
||||
v.fail_count
|
||||
FROM user_verification v
|
||||
LEFT JOIN "event" e ON e.id = v.event_id
|
||||
WHERE (v.verified_at < $1 OR v.verified_at IS NULL)
|
||||
AND (v.failed_at < $1 OR v.failed_at IS NULL)
|
||||
ORDER BY v.verified_at ASC, v.failed_at ASC
|
||||
LIMIT 1"#;
|
||||
sqlx::query_as::<_, VerificationRecord>(query)
|
||||
.bind(Utc.timestamp_opt(before as i64, 0).unwrap())
|
||||
.fetch_optional(&self.conn)
|
||||
.await?
|
||||
.ok_or(error::Error::SqlxError(RowNotFound))
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a dynamic SQL query and params from a subscription filter.
|
||||
fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
|
||||
// if the filter is malformed, don't return anything.
|
||||
if f.force_no_match {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut query = QueryBuilder::new("SELECT e.\"content\", e.created_at FROM \"event\" e WHERE ");
|
||||
|
||||
// This tracks whether we need to push a prefix AND before adding another clause
|
||||
let mut push_and = false;
|
||||
// Query for "authors", allowing prefix matches
|
||||
if let Some(auth_vec) = &f.authors {
|
||||
// filter out non-hex values
|
||||
let auth_vec: Vec<&String> = auth_vec.iter().filter(|a| is_hex(a)).collect();
|
||||
|
||||
if !auth_vec.is_empty() {
|
||||
query.push("(");
|
||||
|
||||
// shortcut authors into "IN" query
|
||||
let any_is_range = auth_vec.iter().any(|pk| pk.len() != 64);
|
||||
if !any_is_range {
|
||||
query.push("e.pub_key in (");
|
||||
let mut pk_sep = query.separated(", ");
|
||||
for pk in auth_vec.iter() {
|
||||
pk_sep.push_bind(hex::decode(pk).ok());
|
||||
}
|
||||
query.push(") OR e.delegated_by in (");
|
||||
let mut pk_delegated_sep = query.separated(", ");
|
||||
for pk in auth_vec.iter() {
|
||||
pk_delegated_sep.push_bind(hex::decode(pk).ok());
|
||||
}
|
||||
query.push(")");
|
||||
push_and = true;
|
||||
} else {
|
||||
let mut range_authors = query.separated(" OR ");
|
||||
for auth in auth_vec {
|
||||
match hex_range(auth) {
|
||||
Some(HexSearch::Exact(ex)) => {
|
||||
range_authors
|
||||
.push("(e.pub_key = ")
|
||||
.push_bind_unseparated(ex.clone())
|
||||
.push_unseparated(" OR e.delegated_by = ")
|
||||
.push_bind_unseparated(ex)
|
||||
.push_unseparated(")");
|
||||
}
|
||||
Some(HexSearch::Range(lower, upper)) => {
|
||||
range_authors
|
||||
.push("((e.pub_key > ")
|
||||
.push_bind_unseparated(lower.clone())
|
||||
.push_unseparated(" AND e.pub_key < ")
|
||||
.push_bind_unseparated(upper.clone())
|
||||
.push_unseparated(") OR (e.delegated_by > ")
|
||||
.push_bind_unseparated(lower)
|
||||
.push_unseparated(" AND e.delegated_by < ")
|
||||
.push_bind_unseparated(upper)
|
||||
.push_unseparated("))");
|
||||
}
|
||||
Some(HexSearch::LowerOnly(lower)) => {
|
||||
range_authors
|
||||
.push("(e.pub_key > ")
|
||||
.push_bind_unseparated(lower.clone())
|
||||
.push_unseparated(" OR e.delegated_by > ")
|
||||
.push_bind_unseparated(lower)
|
||||
.push_unseparated(")");
|
||||
}
|
||||
None => {
|
||||
info!("Could not parse hex range from author {:?}", auth);
|
||||
}
|
||||
}
|
||||
push_and = true;
|
||||
}
|
||||
}
|
||||
query.push(")");
|
||||
}
|
||||
}
|
||||
|
||||
// Query for Kind
|
||||
if let Some(ks) = &f.kinds {
|
||||
if !ks.is_empty() {
|
||||
if push_and {
|
||||
query.push(" AND ");
|
||||
}
|
||||
push_and = true;
|
||||
|
||||
query.push("e.kind in (");
|
||||
let mut list_query = query.separated(", ");
|
||||
for k in ks.iter() {
|
||||
list_query.push_bind(*k as i64);
|
||||
}
|
||||
query.push(")");
|
||||
}
|
||||
}
|
||||
|
||||
// Query for event, allowing prefix matches
|
||||
if let Some(id_vec) = &f.ids {
|
||||
// filter out non-hex values
|
||||
let id_vec: Vec<&String> = id_vec.iter().filter(|a| is_hex(a)).collect();
|
||||
|
||||
if !id_vec.is_empty() {
|
||||
if push_and {
|
||||
query.push(" AND (");
|
||||
} else {
|
||||
query.push("(");
|
||||
}
|
||||
push_and = true;
|
||||
|
||||
// shortcut ids into "IN" query
|
||||
let any_is_range = id_vec.iter().any(|pk| pk.len() != 64);
|
||||
if !any_is_range {
|
||||
query.push("id in (");
|
||||
let mut sep = query.separated(", ");
|
||||
for id in id_vec.iter() {
|
||||
sep.push_bind(hex::decode(id).ok());
|
||||
}
|
||||
query.push(")");
|
||||
} else {
|
||||
// take each author and convert to a hex search
|
||||
let mut id_query = query.separated(" OR ");
|
||||
for id in id_vec {
|
||||
match hex_range(id) {
|
||||
Some(HexSearch::Exact(ex)) => {
|
||||
id_query
|
||||
.push("(id = ")
|
||||
.push_bind_unseparated(ex)
|
||||
.push_unseparated(")");
|
||||
}
|
||||
Some(HexSearch::Range(lower, upper)) => {
|
||||
id_query
|
||||
.push("(id > ")
|
||||
.push_bind_unseparated(lower)
|
||||
.push_unseparated(" AND id < ")
|
||||
.push_bind_unseparated(upper)
|
||||
.push_unseparated(")");
|
||||
}
|
||||
Some(HexSearch::LowerOnly(lower)) => {
|
||||
id_query
|
||||
.push("(id > ")
|
||||
.push_bind_unseparated(lower)
|
||||
.push_unseparated(")");
|
||||
}
|
||||
None => {
|
||||
info!("Could not parse hex range from id {:?}", id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query.push(")");
|
||||
}
|
||||
}
|
||||
|
||||
// Query for tags
|
||||
if let Some(map) = &f.tags {
|
||||
if !map.is_empty() {
|
||||
if push_and {
|
||||
query.push(" AND ");
|
||||
}
|
||||
push_and = true;
|
||||
|
||||
for (key, val) in map.iter() {
|
||||
query.push("e.id IN (SELECT ee.id FROM \"event\" ee LEFT JOIN tag t on ee.id = t.event_id WHERE ee.hidden != 1::bit(1) and (t.\"name\" = ")
|
||||
.push_bind(key.to_string())
|
||||
.push(" AND (value in (");
|
||||
|
||||
// plain value match first
|
||||
let mut tag_query = query.separated(", ");
|
||||
for v in val.iter() {
|
||||
if (v.len() % 2 != 0) && !is_lower_hex(v) {
|
||||
tag_query.push_bind(v.as_bytes());
|
||||
} else {
|
||||
tag_query.push_bind(hex::decode(v).ok());
|
||||
}
|
||||
}
|
||||
query.push("))))");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Query for timestamp
|
||||
if f.since.is_some() {
|
||||
if push_and {
|
||||
query.push(" AND ");
|
||||
}
|
||||
push_and = true;
|
||||
query
|
||||
.push("e.created_at > ")
|
||||
.push_bind(Utc.timestamp_opt(f.since.unwrap() as i64, 0).unwrap());
|
||||
}
|
||||
|
||||
// Query for timestamp
|
||||
if f.until.is_some() {
|
||||
if push_and {
|
||||
query.push(" AND ");
|
||||
}
|
||||
push_and = true;
|
||||
query
|
||||
.push("e.created_at < ")
|
||||
.push_bind(Utc.timestamp_opt(f.until.unwrap() as i64, 0).unwrap());
|
||||
}
|
||||
|
||||
// never display hidden events
|
||||
if push_and {
|
||||
query.push(" AND e.hidden != 1::bit(1)");
|
||||
} else {
|
||||
query.push("e.hidden != 1::bit(1)");
|
||||
}
|
||||
// never display expired events
|
||||
query
|
||||
.push(" AND (e.expires_at IS NULL OR e.expires_at > ")
|
||||
.push_bind(Utc.timestamp_opt(utils::unix_time() as i64, 0).unwrap()).push(")");
|
||||
|
||||
// Apply per-filter limit to this query.
|
||||
// The use of a LIMIT implies a DESC order, to capture only the most recent events.
|
||||
if let Some(lim) = f.limit {
|
||||
query.push(" ORDER BY e.created_at DESC LIMIT ");
|
||||
query.push(lim.min(1000));
|
||||
} else {
|
||||
query.push(" ORDER BY e.created_at ASC LIMIT ");
|
||||
query.push(1000);
|
||||
}
|
||||
Some(query)
|
||||
}
|
||||
|
||||
impl FromRow<'_, PgRow> for VerificationRecord {
|
||||
fn from_row(row: &'_ PgRow) -> std::result::Result<Self, Error> {
|
||||
let name =
|
||||
Nip05Name::try_from(row.get::<'_, &str, &str>("name")).or(Err(RowNotFound))?;
|
||||
Ok(VerificationRecord {
|
||||
rowid: row.get::<'_, i64, &str>("id") as u64,
|
||||
name,
|
||||
address: hex::encode(row.get::<'_, Vec<u8>, &str>("pub_key")),
|
||||
event: hex::encode(row.get::<'_, Vec<u8>, &str>("event_id")),
|
||||
event_created: row.get::<'_, DateTime<Utc>, &str>("created_at").timestamp() as u64,
|
||||
last_success: None,
|
||||
last_failure: match row.try_get::<'_, DateTime<Utc>, &str>("failed_at") {
|
||||
Ok(x) => Some(x.timestamp() as u64),
|
||||
_ => None,
|
||||
},
|
||||
failure_count: row.get::<'_, i32, &str>("fail_count") as u64,
|
||||
})
|
||||
}
|
||||
}
|
279
src/repo/postgres_migration.rs
Normal file
279
src/repo/postgres_migration.rs
Normal file
@@ -0,0 +1,279 @@
|
||||
use crate::repo::postgres::PostgresPool;
|
||||
use async_trait::async_trait;
|
||||
use sqlx::{Executor, Postgres, Transaction};
|
||||
|
||||
#[async_trait]
|
||||
pub trait Migration {
|
||||
fn serial_number(&self) -> i64;
|
||||
async fn run(&self, tx: &mut Transaction<Postgres>);
|
||||
}
|
||||
|
||||
struct SimpleSqlMigration {
|
||||
pub serial_number: i64,
|
||||
pub sql: Vec<&'static str>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Migration for SimpleSqlMigration {
|
||||
fn serial_number(&self) -> i64 {
|
||||
self.serial_number
|
||||
}
|
||||
|
||||
async fn run(&self, tx: &mut Transaction<Postgres>) {
|
||||
for sql in self.sql.iter() {
|
||||
tx.execute(*sql).await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute all migrations on the database.
|
||||
pub async fn run_migrations(db: &PostgresPool) -> crate::error::Result<usize> {
|
||||
prepare_migrations_table(db).await;
|
||||
run_migration(m001::migration(), db).await;
|
||||
let m002_result = run_migration(m002::migration(), db).await;
|
||||
if m002_result == MigrationResult::Upgraded {
|
||||
m002::rebuild_tags(db).await?;
|
||||
}
|
||||
run_migration(m003::migration(), db).await;
|
||||
run_migration(m004::migration(), db).await;
|
||||
Ok(current_version(db).await as usize)
|
||||
}
|
||||
|
||||
async fn current_version(db: &PostgresPool) -> i64 {
|
||||
sqlx::query_scalar("SELECT max(serial_number) FROM migrations;")
|
||||
.fetch_one(db)
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
async fn prepare_migrations_table(db: &PostgresPool) {
|
||||
sqlx::query("CREATE TABLE IF NOT EXISTS migrations (serial_number bigint)")
|
||||
.execute(db)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Running a migration was either unnecessary, or completed
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
enum MigrationResult {
|
||||
Upgraded,
|
||||
NotNeeded,
|
||||
}
|
||||
|
||||
async fn run_migration(migration: impl Migration, db: &PostgresPool) -> MigrationResult {
|
||||
let row: i64 =
|
||||
sqlx::query_scalar("SELECT COUNT(*) AS count FROM migrations WHERE serial_number = $1")
|
||||
.bind(migration.serial_number())
|
||||
.fetch_one(db)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
if row > 0 {
|
||||
return MigrationResult::NotNeeded;
|
||||
}
|
||||
|
||||
let mut transaction = db.begin().await.unwrap();
|
||||
migration.run(&mut transaction).await;
|
||||
|
||||
sqlx::query("INSERT INTO migrations VALUES ($1)")
|
||||
.bind(migration.serial_number())
|
||||
.execute(&mut transaction)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
transaction.commit().await.unwrap();
|
||||
MigrationResult::Upgraded
|
||||
}
|
||||
|
||||
mod m001 {
|
||||
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
|
||||
|
||||
pub const VERSION: i64 = 1;
|
||||
|
||||
pub fn migration() -> impl Migration {
|
||||
SimpleSqlMigration {
|
||||
serial_number: VERSION,
|
||||
sql: vec![
|
||||
r#"
|
||||
-- Events table
|
||||
CREATE TABLE "event" (
|
||||
id bytea NOT NULL,
|
||||
pub_key bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
kind integer NOT NULL,
|
||||
"content" bytea NOT NULL,
|
||||
hidden bit(1) NOT NULL DEFAULT 0::bit(1),
|
||||
delegated_by bytea NULL,
|
||||
first_seen timestamp with time zone NOT NULL DEFAULT now(),
|
||||
CONSTRAINT event_pkey PRIMARY KEY (id)
|
||||
);
|
||||
CREATE INDEX event_created_at_idx ON "event" (created_at,kind);
|
||||
CREATE INDEX event_pub_key_idx ON "event" (pub_key);
|
||||
CREATE INDEX event_delegated_by_idx ON "event" (delegated_by);
|
||||
|
||||
-- Tags table
|
||||
CREATE TABLE "tag" (
|
||||
id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY,
|
||||
event_id bytea NOT NULL,
|
||||
"name" varchar NOT NULL,
|
||||
value bytea NOT NULL,
|
||||
CONSTRAINT tag_fk FOREIGN KEY (event_id) REFERENCES "event"(id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX tag_event_id_idx ON tag USING btree (event_id, name);
|
||||
CREATE INDEX tag_value_idx ON tag USING btree (value);
|
||||
|
||||
-- NIP-05 Verfication table
|
||||
CREATE TABLE "user_verification" (
|
||||
id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY,
|
||||
event_id bytea NOT NULL,
|
||||
"name" varchar NOT NULL,
|
||||
verified_at timestamptz NULL,
|
||||
failed_at timestamptz NULL,
|
||||
fail_count int4 NULL DEFAULT 0,
|
||||
CONSTRAINT user_verification_pk PRIMARY KEY (id),
|
||||
CONSTRAINT user_verification_fk FOREIGN KEY (event_id) REFERENCES "event"(id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX user_verification_event_id_idx ON user_verification USING btree (event_id);
|
||||
CREATE INDEX user_verification_name_idx ON user_verification USING btree (name);
|
||||
"#,
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod m002 {
|
||||
use async_std::stream::StreamExt;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use sqlx::Row;
|
||||
use std::time::Instant;
|
||||
use tracing::info;
|
||||
|
||||
use crate::event::{single_char_tagname, Event};
|
||||
use crate::repo::postgres::PostgresPool;
|
||||
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
|
||||
use crate::utils::is_lower_hex;
|
||||
|
||||
pub const VERSION: i64 = 2;
|
||||
|
||||
pub fn migration() -> impl Migration {
|
||||
SimpleSqlMigration {
|
||||
serial_number: VERSION,
|
||||
sql: vec![
|
||||
r#"
|
||||
-- Add tag value column
|
||||
ALTER TABLE tag ADD COLUMN value_hex bytea;
|
||||
-- Remove not-null constraint
|
||||
ALTER TABLE tag ALTER COLUMN value DROP NOT NULL;
|
||||
-- Add value index
|
||||
CREATE INDEX tag_value_hex_idx ON tag USING btree (value_hex);
|
||||
"#,
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn rebuild_tags(db: &PostgresPool) -> crate::error::Result<()> {
|
||||
// Check how many events we have to process
|
||||
let start = Instant::now();
|
||||
let mut tx = db.begin().await.unwrap();
|
||||
let mut update_tx = db.begin().await.unwrap();
|
||||
// Clear out table
|
||||
sqlx::query("DELETE FROM tag;")
|
||||
.execute(&mut update_tx)
|
||||
.await?;
|
||||
{
|
||||
let event_count: i64 = sqlx::query_scalar("SELECT COUNT(*) from event;")
|
||||
.fetch_one(&mut tx)
|
||||
.await
|
||||
.unwrap();
|
||||
let bar = ProgressBar::new(event_count.try_into().unwrap())
|
||||
.with_message("rebuilding tags table");
|
||||
bar.set_style(
|
||||
ProgressStyle::with_template(
|
||||
"[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
let mut events =
|
||||
sqlx::query("SELECT id, content FROM event ORDER BY id;").fetch(&mut tx);
|
||||
while let Some(row) = events.next().await {
|
||||
bar.inc(1);
|
||||
// get the row id and content
|
||||
let row = row.unwrap();
|
||||
let event_id: Vec<u8> = row.get(0);
|
||||
let event_bytes: Vec<u8> = row.get(1);
|
||||
let event: Event = serde_json::from_str(&String::from_utf8(event_bytes).unwrap())?;
|
||||
|
||||
for t in event.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagnamechar_opt = single_char_tagname(tagname);
|
||||
if tagnamechar_opt.is_none() {
|
||||
continue;
|
||||
}
|
||||
// safe because len was > 1
|
||||
let tagval = t.get(1).unwrap();
|
||||
// insert as BLOB if we can restore it losslessly.
|
||||
// this means it needs to be even length and lowercase.
|
||||
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
|
||||
let q = "INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES ($1, $2, NULL, $3) ON CONFLICT DO NOTHING;";
|
||||
sqlx::query(q)
|
||||
.bind(&event_id)
|
||||
.bind(tagname)
|
||||
.bind(hex::decode(tagval).ok())
|
||||
.execute(&mut update_tx)
|
||||
.await?;
|
||||
} else {
|
||||
let q = "INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES ($1, $2, $3, NULL) ON CONFLICT DO NOTHING;";
|
||||
sqlx::query(q)
|
||||
.bind(&event_id)
|
||||
.bind(tagname)
|
||||
.bind(tagval.as_bytes())
|
||||
.execute(&mut update_tx)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
update_tx.commit().await?;
|
||||
bar.finish();
|
||||
}
|
||||
info!("rebuilt tags in {:?}", start.elapsed());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
mod m003 {
|
||||
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
|
||||
|
||||
pub const VERSION: i64 = 3;
|
||||
|
||||
pub fn migration() -> impl Migration {
|
||||
SimpleSqlMigration {
|
||||
serial_number: VERSION,
|
||||
sql: vec![
|
||||
r#"
|
||||
-- Add unique constraint on tag
|
||||
ALTER TABLE tag ADD CONSTRAINT unique_constraint_name UNIQUE (event_id, "name", value, value_hex);
|
||||
"#,
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod m004 {
|
||||
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
|
||||
|
||||
pub const VERSION: i64 = 4;
|
||||
|
||||
pub fn migration() -> impl Migration {
|
||||
SimpleSqlMigration {
|
||||
serial_number: VERSION,
|
||||
sql: vec![
|
||||
r#"
|
||||
-- Add expiration time for events
|
||||
ALTER TABLE event ADD COLUMN expires_at timestamp(0) with time zone;
|
||||
-- Index expiration time
|
||||
CREATE INDEX event_expires_at_idx ON "event" (expires_at);
|
||||
"#,
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
1121
src/repo/sqlite.rs
Normal file
1121
src/repo/sqlite.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -10,24 +10,28 @@ use rusqlite::Connection;
|
||||
use std::cmp::Ordering;
|
||||
use std::time::Instant;
|
||||
use tracing::{debug, error, info};
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
|
||||
/// Startup DB Pragmas
|
||||
pub const STARTUP_SQL: &str = r##"
|
||||
PRAGMA main.synchronous=NORMAL;
|
||||
PRAGMA main.synchronous = NORMAL;
|
||||
PRAGMA foreign_keys = ON;
|
||||
PRAGMA journal_size_limit=32768;
|
||||
pragma mmap_size = 536870912; -- 512MB of mmap
|
||||
PRAGMA journal_size_limit = 32768;
|
||||
PRAGMA temp_store = 2; -- use memory, not temp files
|
||||
PRAGMA main.cache_size = 20000; -- 80MB max cache size per conn
|
||||
pragma mmap_size = 17179869184; -- cap mmap at 16GB
|
||||
"##;
|
||||
|
||||
/// Latest database version
|
||||
pub const DB_VERSION: usize = 9;
|
||||
pub const DB_VERSION: usize = 17;
|
||||
|
||||
/// Schema definition
|
||||
const INIT_SQL: &str = formatcp!(
|
||||
r##"
|
||||
-- Database settings
|
||||
PRAGMA encoding = "UTF-8";
|
||||
PRAGMA journal_mode=WAL;
|
||||
PRAGMA journal_mode = WAL;
|
||||
PRAGMA auto_vacuum = FULL;
|
||||
PRAGMA main.synchronous=NORMAL;
|
||||
PRAGMA foreign_keys = ON;
|
||||
PRAGMA application_id = 1654008667;
|
||||
@@ -39,6 +43,7 @@ id INTEGER PRIMARY KEY,
|
||||
event_hash BLOB NOT NULL, -- 4-byte hash
|
||||
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970)
|
||||
created_at INTEGER NOT NULL, -- when the event was authored
|
||||
expires_at INTEGER, -- when the event expires and may be deleted
|
||||
author BLOB NOT NULL, -- author pubkey
|
||||
delegated_by BLOB, -- delegator pubkey (NIP-26)
|
||||
kind INTEGER NOT NULL, -- event kind
|
||||
@@ -49,24 +54,35 @@ content TEXT NOT NULL -- serialized json of event object
|
||||
-- Event Indexes
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
|
||||
CREATE INDEX IF NOT EXISTS author_index ON event(author);
|
||||
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
|
||||
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
|
||||
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
|
||||
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
|
||||
CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author);
|
||||
CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at);
|
||||
CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at);
|
||||
CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind);
|
||||
CREATE INDEX IF NOT EXISTS event_expiration ON event(expires_at);
|
||||
|
||||
-- Tag Table
|
||||
-- Tag values are stored as either a BLOB (if they come in as a
|
||||
-- hex-string), or TEXT otherwise.
|
||||
-- This means that searches need to select the appropriate column.
|
||||
-- We duplicate the kind/created_at to make indexes much more efficient.
|
||||
CREATE TABLE IF NOT EXISTS tag (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
||||
name TEXT, -- the tag name ("p", "e", whatever)
|
||||
value TEXT, -- the tag value, if not hex.
|
||||
value_hex BLOB, -- the tag value, if it can be interpreted as a lowercase hex string.
|
||||
created_at INTEGER NOT NULL, -- when the event was authored
|
||||
kind INTEGER NOT NULL, -- event kind
|
||||
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_hex_index ON tag(value_hex);
|
||||
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,event_id);
|
||||
|
||||
-- NIP-05 User Validation
|
||||
CREATE TABLE IF NOT EXISTS user_verification (
|
||||
@@ -91,7 +107,21 @@ pub fn curr_db_version(conn: &mut Connection) -> Result<usize> {
|
||||
Ok(curr_version)
|
||||
}
|
||||
|
||||
fn mig_init(conn: &mut PooledConnection) -> Result<usize> {
|
||||
/// Determine event count
|
||||
pub fn db_event_count(conn: &mut Connection) -> Result<usize> {
|
||||
let query = "SELECT count(*) FROM event;";
|
||||
let count = conn.query_row(query, [], |row| row.get(0))?;
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
/// Determine tag count
|
||||
pub fn db_tag_count(conn: &mut Connection) -> Result<usize> {
|
||||
let query = "SELECT count(*) FROM tag;";
|
||||
let count = conn.query_row(query, [], |row| row.get(0))?;
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
fn mig_init(conn: &mut PooledConnection) -> usize {
|
||||
match conn.execute_batch(INIT_SQL) {
|
||||
Ok(()) => {
|
||||
info!(
|
||||
@@ -104,11 +134,11 @@ fn mig_init(conn: &mut PooledConnection) -> Result<usize> {
|
||||
panic!("database could not be initialized");
|
||||
}
|
||||
}
|
||||
Ok(DB_VERSION)
|
||||
DB_VERSION
|
||||
}
|
||||
|
||||
/// Upgrade DB to latest version, and execute pragma settings
|
||||
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<usize> {
|
||||
// check the version.
|
||||
let mut curr_version = curr_db_version(conn)?;
|
||||
info!("DB version = {:?}", curr_version);
|
||||
@@ -119,11 +149,11 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
);
|
||||
debug!(
|
||||
"SQLite max table/blob/text length: {} MB",
|
||||
(conn.limit(Limit::SQLITE_LIMIT_LENGTH) as f64 / (1024 * 1024) as f64).floor()
|
||||
(f64::from(conn.limit(Limit::SQLITE_LIMIT_LENGTH)) / f64::from(1024 * 1024)).floor()
|
||||
);
|
||||
debug!(
|
||||
"SQLite max SQL length: {} MB",
|
||||
(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH) as f64 / (1024 * 1024) as f64).floor()
|
||||
(f64::from(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH)) / f64::from(1024 * 1024)).floor()
|
||||
);
|
||||
|
||||
match curr_version.cmp(&DB_VERSION) {
|
||||
@@ -131,26 +161,22 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
Ordering::Less => {
|
||||
// initialize from scratch
|
||||
if curr_version == 0 {
|
||||
curr_version = mig_init(conn)?;
|
||||
curr_version = mig_init(conn);
|
||||
}
|
||||
// for initialized but out-of-date schemas, proceed to
|
||||
// upgrade sequentially until we are current.
|
||||
if curr_version == 1 {
|
||||
curr_version = mig_1_to_2(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 2 {
|
||||
curr_version = mig_2_to_3(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 3 {
|
||||
curr_version = mig_3_to_4(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 4 {
|
||||
curr_version = mig_4_to_5(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 5 {
|
||||
curr_version = mig_5_to_6(conn)?;
|
||||
}
|
||||
@@ -163,6 +189,30 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
if curr_version == 8 {
|
||||
curr_version = mig_8_to_9(conn)?;
|
||||
}
|
||||
if curr_version == 9 {
|
||||
curr_version = mig_9_to_10(conn)?;
|
||||
}
|
||||
if curr_version == 10 {
|
||||
curr_version = mig_10_to_11(conn)?;
|
||||
}
|
||||
if curr_version == 11 {
|
||||
curr_version = mig_11_to_12(conn)?;
|
||||
}
|
||||
if curr_version == 12 {
|
||||
curr_version = mig_12_to_13(conn)?;
|
||||
}
|
||||
if curr_version == 13 {
|
||||
curr_version = mig_13_to_14(conn)?;
|
||||
}
|
||||
if curr_version == 14 {
|
||||
curr_version = mig_14_to_15(conn)?;
|
||||
}
|
||||
if curr_version == 15 {
|
||||
curr_version = mig_15_to_16(conn)?;
|
||||
}
|
||||
if curr_version == 16 {
|
||||
curr_version = mig_16_to_17(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == DB_VERSION {
|
||||
info!(
|
||||
@@ -173,13 +223,12 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
}
|
||||
// Database is current, all is good
|
||||
Ordering::Equal => {
|
||||
debug!("Database version was already current (v{})", DB_VERSION);
|
||||
debug!("Database version was already current (v{DB_VERSION})");
|
||||
}
|
||||
// Database is newer than what this code understands, abort
|
||||
Ordering::Greater => {
|
||||
panic!(
|
||||
"Database version is newer than supported by this executable (v{} > v{})",
|
||||
curr_version, DB_VERSION
|
||||
"Database version is newer than supported by this executable (v{curr_version} > v{DB_VERSION})",
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -187,9 +236,65 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
// Setup PRAGMA
|
||||
conn.execute_batch(STARTUP_SQL)?;
|
||||
debug!("SQLite PRAGMA startup completed");
|
||||
Ok(DB_VERSION)
|
||||
}
|
||||
|
||||
pub fn rebuild_tags(conn: &mut PooledConnection) -> Result<()> {
|
||||
// Check how many events we have to process
|
||||
let count = db_event_count(conn)?;
|
||||
let update_each_percent = 0.05;
|
||||
let mut percent_done = 0.0;
|
||||
let mut events_processed = 0;
|
||||
let start = Instant::now();
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
// Clear out table
|
||||
tx.execute("DELETE FROM tag;", [])?;
|
||||
let mut stmt = tx.prepare("select id, content from event order by id;")?;
|
||||
let mut tag_rows = stmt.query([])?;
|
||||
while let Some(row) = tag_rows.next()? {
|
||||
if (events_processed as f32)/(count as f32) > percent_done {
|
||||
info!("Tag update {}% complete...", (100.0*percent_done).round());
|
||||
percent_done += update_each_percent;
|
||||
}
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_id: u64 = row.get(0)?;
|
||||
let event_json: String = row.get(1)?;
|
||||
let event: Event = serde_json::from_str(&event_json)?;
|
||||
// look at each event, and each tag, creating new tag entries if appropriate.
|
||||
for t in event.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagnamechar_opt = single_char_tagname(tagname);
|
||||
if tagnamechar_opt.is_none() {
|
||||
continue;
|
||||
}
|
||||
// safe because len was > 1
|
||||
let tagval = t.get(1).unwrap();
|
||||
// insert as BLOB if we can restore it losslessly.
|
||||
// this means it needs to be even length and lowercase.
|
||||
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tagname, hex::decode(tagval).ok()],
|
||||
)?;
|
||||
} else {
|
||||
// otherwise, insert as text
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tagname, &tagval],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
events_processed += 1;
|
||||
}
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("rebuilt tags in {:?}", start.elapsed());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
|
||||
//// Migration Scripts
|
||||
|
||||
fn mig_1_to_2(conn: &mut PooledConnection) -> Result<usize> {
|
||||
@@ -321,7 +426,6 @@ fn mig_5_to_6(conn: &mut PooledConnection) -> Result<usize> {
|
||||
let mut stmt = tx.prepare("select id, content from event order by id;")?;
|
||||
let mut tag_rows = stmt.query([])?;
|
||||
while let Some(row) = tag_rows.next()? {
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_id: u64 = row.get(0)?;
|
||||
let event_json: String = row.get(1)?;
|
||||
let event: Event = serde_json::from_str(&event_json)?;
|
||||
@@ -363,7 +467,6 @@ fn mig_5_to_6(conn: &mut PooledConnection) -> Result<usize> {
|
||||
|
||||
fn mig_6_to_7(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 6->7");
|
||||
// only change is adding a hidden column to events.
|
||||
let upgrade_sql = r##"
|
||||
ALTER TABLE event ADD delegated_by BLOB;
|
||||
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
|
||||
@@ -419,5 +522,234 @@ PRAGMA user_version = 9;
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(8)
|
||||
Ok(9)
|
||||
}
|
||||
|
||||
fn mig_9_to_10(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 9->10");
|
||||
// Those old indexes were actually helpful...
|
||||
let upgrade_sql = r##"
|
||||
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
|
||||
PRAGMA user_version = 10;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v9 -> v10");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(10)
|
||||
}
|
||||
|
||||
fn mig_10_to_11(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 10->11");
|
||||
// Those old indexes were actually helpful...
|
||||
let upgrade_sql = r##"
|
||||
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
|
||||
reindex;
|
||||
pragma optimize;
|
||||
PRAGMA user_version = 11;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v10 -> v11");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(11)
|
||||
}
|
||||
|
||||
fn mig_11_to_12(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 11->12");
|
||||
let start = Instant::now();
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
// Lookup every replaceable event
|
||||
let mut stmt = tx.prepare("select kind,author from event where kind in (0,3,41) or (kind>=10000 and kind<20000) order by id;")?;
|
||||
let mut replaceable_rows = stmt.query([])?;
|
||||
info!("updating replaceable events; this could take awhile...");
|
||||
while let Some(row) = replaceable_rows.next()? {
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_kind: u64 = row.get(0)?;
|
||||
let event_author: Vec<u8> = row.get(1)?;
|
||||
tx.execute(
|
||||
"UPDATE event SET hidden=TRUE WHERE hidden!=TRUE and kind=? and author=? and id NOT IN (SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1)",
|
||||
params![event_kind, event_author, event_kind, event_author],
|
||||
)?;
|
||||
}
|
||||
tx.execute("PRAGMA user_version = 12;", [])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("database schema upgraded v11 -> v12 in {:?}", start.elapsed());
|
||||
// vacuum after large table modification
|
||||
let start = Instant::now();
|
||||
conn.execute("VACUUM;", [])?;
|
||||
info!("vacuumed DB after hidden event cleanup in {:?}", start.elapsed());
|
||||
Ok(12)
|
||||
}
|
||||
|
||||
fn mig_12_to_13(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 12->13");
|
||||
let upgrade_sql = r##"
|
||||
CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author);
|
||||
reindex;
|
||||
pragma optimize;
|
||||
PRAGMA user_version = 13;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v12 -> v13");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(13)
|
||||
}
|
||||
|
||||
fn mig_13_to_14(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 13->14");
|
||||
let upgrade_sql = r##"
|
||||
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
|
||||
CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at);
|
||||
pragma optimize;
|
||||
PRAGMA user_version = 14;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v13 -> v14");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(14)
|
||||
}
|
||||
|
||||
fn mig_14_to_15(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 14->15");
|
||||
let upgrade_sql = r##"
|
||||
CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at);
|
||||
CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind);
|
||||
PRAGMA user_version = 15;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v14 -> v15");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
// clear out hidden events
|
||||
let clear_hidden_sql = r##"DELETE FROM event WHERE HIDDEN=true;"##;
|
||||
info!("removing hidden events; this may take awhile...");
|
||||
match conn.execute_batch(clear_hidden_sql) {
|
||||
Ok(()) => {
|
||||
info!("all hidden events removed");
|
||||
},
|
||||
Err(err) => {
|
||||
error!("delete failed: {}", err);
|
||||
panic!("could not remove hidden events");
|
||||
}
|
||||
}
|
||||
Ok(15)
|
||||
}
|
||||
|
||||
fn mig_15_to_16(conn: &mut PooledConnection) -> Result<usize> {
|
||||
let count = db_event_count(conn)?;
|
||||
info!("database schema needs update from 15->16 (this may take a few minutes)");
|
||||
let upgrade_sql = r##"
|
||||
DROP TABLE tag;
|
||||
CREATE TABLE tag (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
||||
name TEXT, -- the tag name ("p", "e", whatever)
|
||||
value TEXT, -- the tag value, if not hex.
|
||||
created_at INTEGER NOT NULL, -- when the event was authored
|
||||
kind INTEGER NOT NULL, -- event kind
|
||||
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
|
||||
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,event_id);
|
||||
"##;
|
||||
|
||||
let start = Instant::now();
|
||||
let tx = conn.transaction()?;
|
||||
|
||||
let bar = ProgressBar::new(count.try_into().unwrap())
|
||||
.with_message("rebuilding tags table");
|
||||
bar.set_style(
|
||||
ProgressStyle::with_template(
|
||||
"[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
{
|
||||
tx.execute_batch(upgrade_sql)?;
|
||||
let mut stmt = tx.prepare("select id, kind, created_at, content from event order by id;")?;
|
||||
let mut tag_rows = stmt.query([])?;
|
||||
let mut count = 0;
|
||||
while let Some(row) = tag_rows.next()? {
|
||||
count += 1;
|
||||
if count%10==0 {
|
||||
bar.inc(10);
|
||||
}
|
||||
let event_id: u64 = row.get(0)?;
|
||||
let kind: u64 = row.get(1)?;
|
||||
let created_at: u64 = row.get(2)?;
|
||||
let event_json: String = row.get(3)?;
|
||||
let event: Event = serde_json::from_str(&event_json)?;
|
||||
// look at each event, and each tag, creating new tag entries if appropriate.
|
||||
for t in event.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagnamechar_opt = single_char_tagname(tagname);
|
||||
if tagnamechar_opt.is_none() {
|
||||
continue;
|
||||
}
|
||||
// safe because len was > 1
|
||||
let tagval = t.get(1).unwrap();
|
||||
// otherwise, insert as text
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value, kind, created_at) VALUES (?1, ?2, ?3, ?4, ?5);",
|
||||
params![event_id, tagname, &tagval, kind, created_at],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
tx.execute("PRAGMA user_version = 16;", [])?;
|
||||
}
|
||||
bar.finish();
|
||||
tx.commit()?;
|
||||
info!("database schema upgraded v15 -> v16 in {:?}", start.elapsed());
|
||||
Ok(16)
|
||||
}
|
||||
|
||||
fn mig_16_to_17(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 16->17");
|
||||
let upgrade_sql = r##"
|
||||
ALTER TABLE event ADD COLUMN expires_at INTEGER;
|
||||
CREATE INDEX IF NOT EXISTS event_expiration ON event(expires_at);
|
||||
PRAGMA user_version = 17;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v16 -> v17");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(17)
|
||||
}
|
577
src/server.rs
577
src/server.rs
@@ -3,17 +3,24 @@ use crate::close::Close;
|
||||
use crate::close::CloseCmd;
|
||||
use crate::config::{Settings, VerifiedUsersMode};
|
||||
use crate::conn;
|
||||
use crate::repo::NostrRepo;
|
||||
use crate::db;
|
||||
use crate::db::SubmittedEvent;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::event::EventWrapper;
|
||||
use crate::server::EventWrapper::{WrappedAuth, WrappedEvent};
|
||||
use crate::event::Event;
|
||||
use crate::event::EventCmd;
|
||||
use crate::info::RelayInfo;
|
||||
use crate::nip05;
|
||||
use crate::notice::Notice;
|
||||
use crate::subscription::Subscription;
|
||||
use prometheus::IntCounterVec;
|
||||
use prometheus::IntGauge;
|
||||
use prometheus::{Encoder, Histogram, IntCounter, HistogramOpts, Opts, Registry, TextEncoder};
|
||||
use futures::SinkExt;
|
||||
use futures::StreamExt;
|
||||
use governor::{Jitter, Quota, RateLimiter};
|
||||
use http::header::HeaderMap;
|
||||
use hyper::header::ACCEPT;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
@@ -25,8 +32,13 @@ use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::Infallible;
|
||||
use std::fs::File;
|
||||
use std::io::BufReader;
|
||||
use std::io::Read;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::mpsc::Receiver as MpscReceiver;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
@@ -35,22 +47,27 @@ use tokio::sync::broadcast::{self, Receiver, Sender};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio_tungstenite::WebSocketStream;
|
||||
use tracing::*;
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
use tungstenite::error::CapacityError::MessageTooLong;
|
||||
use tungstenite::error::Error as WsError;
|
||||
use tungstenite::handshake;
|
||||
use tungstenite::protocol::Message;
|
||||
use tungstenite::protocol::WebSocketConfig;
|
||||
use crate::server::Error::CommandUnknownError;
|
||||
|
||||
/// Handle arbitrary HTTP requests, including for WebSocket upgrades.
|
||||
/// Handle arbitrary HTTP requests, including for `WebSocket` upgrades.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn handle_web_request(
|
||||
mut request: Request<Body>,
|
||||
pool: db::SqlitePool,
|
||||
repo: Arc<dyn NostrRepo>,
|
||||
settings: Settings,
|
||||
remote_addr: SocketAddr,
|
||||
broadcast: Sender<Event>,
|
||||
event_tx: tokio::sync::mpsc::Sender<SubmittedEvent>,
|
||||
shutdown: Receiver<()>,
|
||||
favicon: Option<Vec<u8>>,
|
||||
registry: Registry,
|
||||
metrics: NostrMetrics,
|
||||
) -> Result<Response<Body>, Infallible> {
|
||||
match (
|
||||
request.uri().path(),
|
||||
@@ -73,6 +90,7 @@ async fn handle_web_request(
|
||||
Ok(upgraded) => {
|
||||
// set WebSocket configuration options
|
||||
let config = WebSocketConfig {
|
||||
max_send_queue: Some(1024),
|
||||
max_message_size: settings.limits.max_ws_message_bytes,
|
||||
max_frame_size: settings.limits.max_ws_frame_bytes,
|
||||
..Default::default()
|
||||
@@ -85,7 +103,8 @@ async fn handle_web_request(
|
||||
tokio_tungstenite::tungstenite::protocol::Role::Server,
|
||||
Some(config),
|
||||
)
|
||||
.await;
|
||||
.await;
|
||||
let origin = get_header_string("origin", request.headers());
|
||||
let user_agent = get_header_string("user-agent", request.headers());
|
||||
// determine the remote IP from headers if the exist
|
||||
let header_ip = settings
|
||||
@@ -99,24 +118,25 @@ async fn handle_web_request(
|
||||
let client_info = ClientInfo {
|
||||
remote_ip,
|
||||
user_agent,
|
||||
origin,
|
||||
};
|
||||
// spawn a nostr server with our websocket
|
||||
tokio::spawn(nostr_server(
|
||||
pool,
|
||||
repo,
|
||||
client_info,
|
||||
settings,
|
||||
ws_stream,
|
||||
broadcast,
|
||||
event_tx,
|
||||
shutdown,
|
||||
metrics,
|
||||
));
|
||||
}
|
||||
// todo: trace, don't print...
|
||||
Err(e) => println!(
|
||||
"error when trying to upgrade connection \
|
||||
from address {} to websocket connection. \
|
||||
Error is: {}",
|
||||
remote_addr, e
|
||||
from address {remote_addr} to websocket connection. \
|
||||
Error is: {e}",
|
||||
),
|
||||
}
|
||||
});
|
||||
@@ -126,7 +146,7 @@ async fn handle_web_request(
|
||||
Err(error) => {
|
||||
warn!("websocket response failed");
|
||||
let mut res =
|
||||
Response::new(Body::from(format!("Failed to create websocket: {}", error)));
|
||||
Response::new(Body::from(format!("Failed to create websocket: {error}")));
|
||||
*res.status_mut() = StatusCode::BAD_REQUEST;
|
||||
return Ok(res);
|
||||
}
|
||||
@@ -144,29 +164,58 @@ async fn handle_web_request(
|
||||
if mt_str.contains("application/nostr+json") {
|
||||
// build a relay info response
|
||||
debug!("Responding to server info request");
|
||||
let rinfo = RelayInfo::from(settings.info);
|
||||
let rinfo = RelayInfo::from(settings);
|
||||
let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap());
|
||||
return Ok(Response::builder()
|
||||
.status(200)
|
||||
.header("Content-Type", "application/nostr+json")
|
||||
.header("Access-Control-Allow-Origin", "*")
|
||||
.body(b)
|
||||
.unwrap());
|
||||
.status(200)
|
||||
.header("Content-Type", "application/nostr+json")
|
||||
.header("Access-Control-Allow-Origin", "*")
|
||||
.body(b)
|
||||
.unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Response::builder()
|
||||
.status(200)
|
||||
.header("Content-Type", "text/plain")
|
||||
.body(Body::from("Please use a Nostr client to connect."))
|
||||
.unwrap())
|
||||
.status(200)
|
||||
.header("Content-Type", "text/plain")
|
||||
.body(Body::from("Please use a Nostr client to connect."))
|
||||
.unwrap())
|
||||
}
|
||||
("/metrics", false) => {
|
||||
let mut buffer = vec![];
|
||||
let encoder = TextEncoder::new();
|
||||
let metric_families = registry.gather();
|
||||
encoder.encode(&metric_families, &mut buffer).unwrap();
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", "text/plain")
|
||||
.body(Body::from(buffer))
|
||||
.unwrap())
|
||||
}
|
||||
("/favicon.ico", false) => {
|
||||
if let Some(favicon_bytes) = favicon {
|
||||
info!("returning favicon");
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", "image/x-icon")
|
||||
// 1 month cache
|
||||
.header("Cache-Control", "public, max-age=2419200")
|
||||
.body(Body::from(favicon_bytes))
|
||||
.unwrap())
|
||||
} else {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::from(""))
|
||||
.unwrap())
|
||||
}
|
||||
}
|
||||
(_, _) => {
|
||||
//handle any other url
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::from("Nothing here."))
|
||||
.unwrap())
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::from("Nothing here."))
|
||||
.unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -174,7 +223,7 @@ async fn handle_web_request(
|
||||
fn get_header_string(header: &str, headers: &HeaderMap) -> Option<String> {
|
||||
headers
|
||||
.get(header)
|
||||
.and_then(|x| x.to_str().ok().map(|x| x.to_string()))
|
||||
.and_then(|x| x.to_str().ok().map(std::string::ToString::to_string))
|
||||
}
|
||||
|
||||
// return on a control-c or internally requested shutdown signal
|
||||
@@ -184,24 +233,113 @@ async fn ctrl_c_or_signal(mut shutdown_signal: Receiver<()>) {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = shutdown_signal.recv() => {
|
||||
info!("Shutting down webserver as requested");
|
||||
// server shutting down, exit loop
|
||||
break;
|
||||
},
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
info!("Shutting down webserver due to SIGINT");
|
||||
break;
|
||||
info!("Shutting down webserver as requested");
|
||||
// server shutting down, exit loop
|
||||
break;
|
||||
},
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
info!("Shutting down webserver due to SIGINT");
|
||||
break;
|
||||
},
|
||||
_ = term_signal.recv() => {
|
||||
info!("Shutting down webserver due to SIGTERM");
|
||||
break;
|
||||
},
|
||||
_ = term_signal.recv() => {
|
||||
info!("Shutting down webserver due to SIGTERM");
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create_metrics() -> (Registry, NostrMetrics) {
|
||||
// setup prometheus registry
|
||||
let registry = Registry::new();
|
||||
|
||||
let query_sub = Histogram::with_opts(HistogramOpts::new(
|
||||
"nostr_query_seconds",
|
||||
"Subscription response times",
|
||||
)).unwrap();
|
||||
let query_db = Histogram::with_opts(HistogramOpts::new(
|
||||
"nostr_filter_seconds",
|
||||
"Filter SQL query times",
|
||||
)).unwrap();
|
||||
let write_events = Histogram::with_opts(HistogramOpts::new(
|
||||
"nostr_events_write_seconds",
|
||||
"Event writing response times",
|
||||
)).unwrap();
|
||||
let sent_events = IntCounterVec::new(
|
||||
Opts::new("nostr_events_sent_total", "Events sent to clients"),
|
||||
vec!["source"].as_slice(),
|
||||
).unwrap();
|
||||
let connections = IntCounter::with_opts(Opts::new(
|
||||
"nostr_connections_total",
|
||||
"New connections",
|
||||
)).unwrap();
|
||||
let db_connections = IntGauge::with_opts(Opts::new(
|
||||
"nostr_db_connections", "Active database connections"
|
||||
)).unwrap();
|
||||
let query_aborts = IntCounterVec::new(
|
||||
Opts::new("nostr_query_abort_total", "Aborted queries"),
|
||||
vec!["reason"].as_slice(),
|
||||
).unwrap();
|
||||
let cmd_req = IntCounter::with_opts(Opts::new(
|
||||
"nostr_cmd_req_total",
|
||||
"REQ commands",
|
||||
)).unwrap();
|
||||
let cmd_event = IntCounter::with_opts(Opts::new(
|
||||
"nostr_cmd_event_total",
|
||||
"EVENT commands",
|
||||
)).unwrap();
|
||||
let cmd_close = IntCounter::with_opts(Opts::new(
|
||||
"nostr_cmd_close_total",
|
||||
"CLOSE commands",
|
||||
)).unwrap();
|
||||
let cmd_auth = IntCounter::with_opts(Opts::new(
|
||||
"nostr_cmd_auth_total",
|
||||
"AUTH commands",
|
||||
)).unwrap();
|
||||
let disconnects = IntCounterVec::new(
|
||||
Opts::new("nostr_disconnects_total", "Client disconnects"),
|
||||
vec!["reason"].as_slice(),
|
||||
).unwrap();
|
||||
registry.register(Box::new(query_sub.clone())).unwrap();
|
||||
registry.register(Box::new(query_db.clone())).unwrap();
|
||||
registry.register(Box::new(write_events.clone())).unwrap();
|
||||
registry.register(Box::new(sent_events.clone())).unwrap();
|
||||
registry.register(Box::new(connections.clone())).unwrap();
|
||||
registry.register(Box::new(db_connections.clone())).unwrap();
|
||||
registry.register(Box::new(query_aborts.clone())).unwrap();
|
||||
registry.register(Box::new(cmd_req.clone())).unwrap();
|
||||
registry.register(Box::new(cmd_event.clone())).unwrap();
|
||||
registry.register(Box::new(cmd_close.clone())).unwrap();
|
||||
registry.register(Box::new(cmd_auth.clone())).unwrap();
|
||||
registry.register(Box::new(disconnects.clone())).unwrap();
|
||||
let metrics = NostrMetrics {
|
||||
query_sub,
|
||||
query_db,
|
||||
write_events,
|
||||
sent_events,
|
||||
connections,
|
||||
db_connections,
|
||||
disconnects,
|
||||
query_aborts,
|
||||
cmd_req,
|
||||
cmd_event,
|
||||
cmd_close,
|
||||
cmd_auth,
|
||||
};
|
||||
(registry,metrics)
|
||||
}
|
||||
|
||||
fn file_bytes(path: &str) -> Result<Vec<u8>> {
|
||||
let f = File::open(path)?;
|
||||
let mut reader = BufReader::new(f);
|
||||
let mut buffer = Vec::new();
|
||||
// Read file into vector.
|
||||
reader.read_to_end(&mut buffer)?;
|
||||
Ok(buffer)
|
||||
}
|
||||
|
||||
/// Start running a Nostr relay server.
|
||||
pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result<(), Error> {
|
||||
pub fn start_server(settings: &Settings, shutdown_rx: MpscReceiver<()>) -> Result<(), Error> {
|
||||
trace!("Config: {:?}", settings);
|
||||
// do some config validation.
|
||||
if !Path::new(&settings.database.data_directory).is_dir() {
|
||||
@@ -243,7 +381,20 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
// configure tokio runtime
|
||||
let rt = Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.thread_name("tokio-ws")
|
||||
.thread_name_fn(|| {
|
||||
// give each thread a unique numeric name
|
||||
static ATOMIC_ID: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(0);
|
||||
let id = ATOMIC_ID.fetch_add(1,Ordering::SeqCst);
|
||||
format!("tokio-ws-{id}")
|
||||
})
|
||||
// limit concurrent SQLite blocking threads
|
||||
.max_blocking_threads(settings.limits.max_blocking_threads)
|
||||
.on_thread_start(|| {
|
||||
trace!("started new thread: {:?}", std::thread::current().name());
|
||||
})
|
||||
.on_thread_stop(|| {
|
||||
trace!("stopped thread: {:?}", std::thread::current().name());
|
||||
})
|
||||
.build()
|
||||
.unwrap();
|
||||
// start tokio
|
||||
@@ -251,8 +402,6 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
let broadcast_buffer_limit = settings.limits.broadcast_buffer;
|
||||
let persist_buffer_limit = settings.limits.event_persist_buffer;
|
||||
let verified_users_active = settings.verified_users.is_active();
|
||||
let db_min_conn = settings.database.min_conn;
|
||||
let db_max_conn = settings.database.max_conn;
|
||||
let settings = settings.clone();
|
||||
info!("listening on: {}", socket_addr);
|
||||
// all client-submitted valid events are broadcast to every
|
||||
@@ -275,23 +424,28 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
// overwhelming this will drop events and won't register
|
||||
// metadata events.
|
||||
let (metadata_tx, metadata_rx) = broadcast::channel::<Event>(4096);
|
||||
// start the database writer thread. Give it a channel for
|
||||
|
||||
let (registry, metrics) = create_metrics();
|
||||
// build a repository for events
|
||||
let repo = db::build_repo(&settings, metrics.clone()).await;
|
||||
// start the database writer task. Give it a channel for
|
||||
// writing events, and for publishing events that have been
|
||||
// written (to all connected clients).
|
||||
db::db_writer(
|
||||
settings.clone(),
|
||||
event_rx,
|
||||
bcast_tx.clone(),
|
||||
metadata_tx.clone(),
|
||||
shutdown_listen,
|
||||
)
|
||||
.await;
|
||||
tokio::task::spawn(
|
||||
db::db_writer(
|
||||
repo.clone(),
|
||||
settings.clone(),
|
||||
event_rx,
|
||||
bcast_tx.clone(),
|
||||
metadata_tx.clone(),
|
||||
shutdown_listen,
|
||||
));
|
||||
info!("db writer created");
|
||||
|
||||
// create a nip-05 verifier thread; if enabled.
|
||||
if settings.verified_users.mode != VerifiedUsersMode::Disabled {
|
||||
let verifier_opt =
|
||||
nip05::Verifier::new(metadata_rx, bcast_tx.clone(), settings.clone());
|
||||
nip05::Verifier::new(repo.clone(), metadata_rx, bcast_tx.clone(), settings.clone());
|
||||
if let Ok(mut v) = verifier_opt {
|
||||
if verified_users_active {
|
||||
tokio::task::spawn(async move {
|
||||
@@ -301,6 +455,7 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// listen for (external to tokio) shutdown request
|
||||
let controlled_shutdown = invoke_shutdown.clone();
|
||||
tokio::spawn(async move {
|
||||
@@ -309,10 +464,9 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
Ok(()) => {
|
||||
info!("control message requesting shutdown");
|
||||
controlled_shutdown.send(()).ok();
|
||||
}
|
||||
},
|
||||
Err(std::sync::mpsc::RecvError) => {
|
||||
// FIXME: spurious error on startup?
|
||||
debug!("shutdown requestor is disconnected");
|
||||
trace!("shutdown requestor is disconnected (this is normal)");
|
||||
}
|
||||
};
|
||||
});
|
||||
@@ -326,36 +480,42 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
info!("shutting down due to SIGINT (main)");
|
||||
ctrl_c_shutdown.send(()).ok();
|
||||
});
|
||||
// build a connection pool for sqlite connections
|
||||
let pool = db::build_pool(
|
||||
"client query",
|
||||
&settings,
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY
|
||||
| rusqlite::OpenFlags::SQLITE_OPEN_SHARED_CACHE,
|
||||
db_min_conn,
|
||||
db_max_conn,
|
||||
true,
|
||||
);
|
||||
// spawn a task to check the pool size.
|
||||
//let pool_monitor = pool.clone();
|
||||
//tokio::spawn(async move {db::monitor_pool("reader", pool_monitor).await;});
|
||||
|
||||
// Read in the favicon if it exists
|
||||
let favicon = settings.info.favicon.as_ref().and_then(|x| {
|
||||
info!("reading favicon...");
|
||||
file_bytes(x).ok()
|
||||
});
|
||||
|
||||
// A `Service` is needed for every connection, so this
|
||||
// creates one from our `handle_request` function.
|
||||
let make_svc = make_service_fn(|conn: &AddrStream| {
|
||||
let svc_pool = pool.clone();
|
||||
let repo = repo.clone();
|
||||
let remote_addr = conn.remote_addr();
|
||||
let bcast = bcast_tx.clone();
|
||||
let event = event_tx.clone();
|
||||
let stop = invoke_shutdown.clone();
|
||||
let settings = settings.clone();
|
||||
let favicon = favicon.clone();
|
||||
let registry = registry.clone();
|
||||
let metrics = metrics.clone();
|
||||
async move {
|
||||
// service_fn converts our function into a `Service`
|
||||
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
|
||||
handle_web_request(
|
||||
request,
|
||||
svc_pool.clone(),
|
||||
repo.clone(),
|
||||
settings.clone(),
|
||||
remote_addr,
|
||||
bcast.clone(),
|
||||
event.clone(),
|
||||
stop.subscribe(),
|
||||
favicon.clone(),
|
||||
registry.clone(),
|
||||
metrics.clone(),
|
||||
)
|
||||
}))
|
||||
}
|
||||
@@ -365,7 +525,7 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
.with_graceful_shutdown(ctrl_c_or_signal(webserver_shutdown_listen));
|
||||
// run hyper in this thread. This is why the thread does not return.
|
||||
if let Err(e) = server.await {
|
||||
eprintln!("server error: {}", e);
|
||||
eprintln!("server error: {e}");
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
@@ -375,7 +535,7 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
#[derive(Deserialize, Serialize, Clone, PartialEq, Eq, Debug)]
|
||||
#[serde(untagged)]
|
||||
pub enum NostrMessage {
|
||||
/// An `EVENT` message
|
||||
/// `EVENT` and `AUTH` messages
|
||||
EventMsg(EventCmd),
|
||||
/// A `REQ` message
|
||||
SubMsg(Subscription),
|
||||
@@ -383,11 +543,15 @@ pub enum NostrMessage {
|
||||
CloseMsg(CloseCmd),
|
||||
}
|
||||
|
||||
/// Convert Message to NostrMessage
|
||||
fn convert_to_msg(msg: String, max_bytes: Option<usize>) -> Result<NostrMessage> {
|
||||
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
|
||||
/// Convert Message to `NostrMessage`
|
||||
fn convert_to_msg(msg: &str, max_bytes: Option<usize>) -> Result<NostrMessage> {
|
||||
let parsed_res: Result<NostrMessage> = serde_json::from_str(msg).map_err(std::convert::Into::into);
|
||||
match parsed_res {
|
||||
Ok(m) => {
|
||||
if let NostrMessage::SubMsg(_) = m {
|
||||
// note; this only prints the first 16k of a REQ and then truncates.
|
||||
trace!("REQ: {:?}",msg);
|
||||
};
|
||||
if let NostrMessage::EventMsg(_) = m {
|
||||
if let Some(max_size) = max_bytes {
|
||||
// check length, ensure that some max size is set.
|
||||
@@ -399,18 +563,19 @@ fn convert_to_msg(msg: String, max_bytes: Option<usize>) -> Result<NostrMessage>
|
||||
Ok(m)
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("proto parse error: {:?}", e);
|
||||
debug!("parse error on message: {}", msg.trim());
|
||||
trace!("proto parse error: {:?}", e);
|
||||
trace!("parse error on message: {:?}", msg.trim());
|
||||
Err(Error::ProtoParseError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Turn a string into a NOTICE message ready to send over a WebSocket
|
||||
fn make_notice_message(notice: Notice) -> Message {
|
||||
/// Turn a string into a NOTICE message ready to send over a `WebSocket`
|
||||
fn make_notice_message(notice: &Notice) -> Message {
|
||||
let json = match notice {
|
||||
Notice::Message(ref msg) => json!(["NOTICE", msg]),
|
||||
Notice::EventResult(ref res) => json!(["OK", res.id, res.status.to_bool(), res.msg]),
|
||||
Notice::AuthChallenge(ref challenge) => json!(["AUTH", challenge]),
|
||||
};
|
||||
|
||||
Message::text(json.to_string())
|
||||
@@ -419,18 +584,21 @@ fn make_notice_message(notice: Notice) -> Message {
|
||||
struct ClientInfo {
|
||||
remote_ip: String,
|
||||
user_agent: Option<String>,
|
||||
origin: Option<String>,
|
||||
}
|
||||
|
||||
/// Handle new client connections. This runs through an event loop
|
||||
/// for all client communication.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn nostr_server(
|
||||
pool: db::SqlitePool,
|
||||
repo: Arc<dyn NostrRepo>,
|
||||
client_info: ClientInfo,
|
||||
settings: Settings,
|
||||
mut ws_stream: WebSocketStream<Upgraded>,
|
||||
broadcast: Sender<Event>,
|
||||
event_tx: mpsc::Sender<SubmittedEvent>,
|
||||
mut shutdown: Receiver<()>,
|
||||
metrics: NostrMetrics,
|
||||
) {
|
||||
// the time this websocket nostr server started
|
||||
let orig_start = Instant::now();
|
||||
@@ -438,13 +606,28 @@ async fn nostr_server(
|
||||
let mut bcast_rx = broadcast.subscribe();
|
||||
// Track internal client state
|
||||
let mut conn = conn::ClientConn::new(client_info.remote_ip);
|
||||
// subscription creation rate limiting
|
||||
let mut sub_lim_opt = None;
|
||||
// 100ms jitter when the rate limiter returns
|
||||
let jitter = Jitter::up_to(Duration::from_millis(100));
|
||||
let sub_per_min_setting = settings.limits.subscriptions_per_min;
|
||||
if let Some(sub_per_min) = sub_per_min_setting {
|
||||
if sub_per_min > 0 {
|
||||
trace!("Rate limits for sub creation ({}/min)", sub_per_min);
|
||||
let quota_time = core::num::NonZeroU32::new(sub_per_min).unwrap();
|
||||
let quota = Quota::per_minute(quota_time);
|
||||
sub_lim_opt = Some(RateLimiter::direct(quota));
|
||||
}
|
||||
}
|
||||
// Use the remote IP as the client identifier
|
||||
let cid = conn.get_client_prefix();
|
||||
// Create a channel for receiving query results from the database.
|
||||
// we will send out the tx handle to any query we generate.
|
||||
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(256);
|
||||
// this has capacity for some of the larger requests we see, which
|
||||
// should allow the DB thread to release the handle earlier.
|
||||
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(20_000);
|
||||
// Create channel for receiving NOTICEs
|
||||
let (notice_tx, mut notice_rx) = mpsc::channel::<Notice>(32);
|
||||
let (notice_tx, mut notice_rx) = mpsc::channel::<Notice>(128);
|
||||
|
||||
// last time this client sent data (message, ping, etc.)
|
||||
let mut last_message_time = Instant::now();
|
||||
@@ -462,20 +645,38 @@ async fn nostr_server(
|
||||
// when these subscriptions are cancelled, make a message
|
||||
// available to the executing query so it knows to stop.
|
||||
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
|
||||
// keep track of the subscriptions we have
|
||||
let mut current_subs: Vec<Subscription> = Vec::new();
|
||||
// for stats, keep track of how many events the client published,
|
||||
// and how many it received from queries.
|
||||
let mut client_published_event_count: usize = 0;
|
||||
let mut client_received_event_count: usize = 0;
|
||||
debug!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
|
||||
if let Some(ua) = client_info.user_agent {
|
||||
debug!("cid: {}, user-agent: {:?}", cid, ua);
|
||||
|
||||
let unspec = "<unspecified>".to_string();
|
||||
info!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
|
||||
let origin = client_info.origin.as_ref().unwrap_or_else(|| &unspec);
|
||||
let user_agent = client_info
|
||||
.user_agent.as_ref()
|
||||
.unwrap_or_else(|| &unspec);
|
||||
info!(
|
||||
"cid: {}, origin: {:?}, user-agent: {:?}",
|
||||
cid, origin, user_agent
|
||||
);
|
||||
|
||||
// Measure connections
|
||||
metrics.connections.inc();
|
||||
|
||||
if settings.authorization.nip42_auth {
|
||||
conn.generate_auth_challenge();
|
||||
if let Some(challenge) = conn.auth_challenge() {
|
||||
ws_stream.send(
|
||||
make_notice_message(&Notice::AuthChallenge(challenge.to_string()))).await.ok();
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = shutdown.recv() => {
|
||||
info!("Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?}", cid, conn.ip(), orig_start.elapsed());
|
||||
metrics.disconnects.with_label_values(&["shutdown"]).inc();
|
||||
info!("Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?}", cid, conn.ip(), orig_start.elapsed());
|
||||
// server shutting down, exit loop
|
||||
break;
|
||||
},
|
||||
@@ -484,22 +685,24 @@ async fn nostr_server(
|
||||
// if it has been too long, disconnect
|
||||
if last_message_time.elapsed() > max_quiet_time {
|
||||
debug!("ending connection due to lack of client ping response");
|
||||
metrics.disconnects.with_label_values(&["timeout"]).inc();
|
||||
break;
|
||||
}
|
||||
// Send a ping
|
||||
ws_stream.send(Message::Ping(Vec::new())).await.ok();
|
||||
},
|
||||
Some(notice_msg) = notice_rx.recv() => {
|
||||
ws_stream.send(make_notice_message(notice_msg)).await.ok();
|
||||
ws_stream.send(make_notice_message(¬ice_msg)).await.ok();
|
||||
},
|
||||
Some(query_result) = query_rx.recv() => {
|
||||
// database informed us of a query result we asked for
|
||||
let subesc = query_result.sub_id.replace('"', "");
|
||||
if query_result.event == "EOSE" {
|
||||
let send_str = format!("[\"EOSE\",\"{}\"]", subesc);
|
||||
let send_str = format!("[\"EOSE\",\"{subesc}\"]");
|
||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||
} else {
|
||||
client_received_event_count += 1;
|
||||
metrics.sent_events.with_label_values(&["db"]).inc();
|
||||
// send a result
|
||||
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
|
||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||
@@ -513,16 +716,16 @@ async fn nostr_server(
|
||||
if !sub.interested_in_event(&global_event) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO: serialize at broadcast time, instead of
|
||||
// once for each consumer.
|
||||
if let Ok(event_str) = serde_json::to_string(&global_event) {
|
||||
debug!("sub match for client: {}, sub: {:?}, event: {:?}",
|
||||
trace!("sub match for client: {}, sub: {:?}, event: {:?}",
|
||||
cid, s,
|
||||
global_event.get_event_id_prefix());
|
||||
// create an event response and send it
|
||||
let subesc = s.replace('"', "");
|
||||
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{}\",{}]", subesc, event_str))).await.ok();
|
||||
metrics.sent_events.with_label_values(&["realtime"]).inc();
|
||||
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{subesc}\",{event_str}]"))).await.ok();
|
||||
} else {
|
||||
warn!("could not serialize event: {:?}", global_event.get_event_id_prefix());
|
||||
}
|
||||
@@ -534,11 +737,11 @@ async fn nostr_server(
|
||||
// Consume text messages from the client, parse into Nostr messages.
|
||||
let nostr_msg = match ws_next {
|
||||
Some(Ok(Message::Text(m))) => {
|
||||
convert_to_msg(m,settings.limits.max_event_bytes)
|
||||
convert_to_msg(&m,settings.limits.max_event_bytes)
|
||||
},
|
||||
Some(Ok(Message::Binary(_))) => {
|
||||
ws_stream.send(
|
||||
make_notice_message(Notice::message("binary messages are not accepted".into()))).await.ok();
|
||||
Some(Ok(Message::Binary(_))) => {
|
||||
ws_stream.send(
|
||||
make_notice_message(&Notice::message("binary messages are not accepted".into()))).await.ok();
|
||||
continue;
|
||||
},
|
||||
Some(Ok(Message::Ping(_) | Message::Pong(_))) => {
|
||||
@@ -546,27 +749,32 @@ async fn nostr_server(
|
||||
// send responses automatically.
|
||||
continue;
|
||||
},
|
||||
Some(Err(WsError::Capacity(MessageTooLong{size, max_size}))) => {
|
||||
ws_stream.send(
|
||||
make_notice_message(Notice::message(format!("message too large ({} > {})",size, max_size)))).await.ok();
|
||||
Some(Err(WsError::Capacity(MessageTooLong{size, max_size}))) => {
|
||||
ws_stream.send(
|
||||
make_notice_message(&Notice::message(format!("message too large ({size} > {max_size})")))).await.ok();
|
||||
continue;
|
||||
},
|
||||
},
|
||||
None |
|
||||
Some(Ok(Message::Close(_)) |
|
||||
Err(WsError::AlreadyClosed | WsError::ConnectionClosed |
|
||||
WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
|
||||
Some(Ok(Message::Close(_)) |
|
||||
Err(WsError::AlreadyClosed | WsError::ConnectionClosed |
|
||||
WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
|
||||
=> {
|
||||
debug!("websocket close from client (cid: {}, ip: {:?})",cid, conn.ip());
|
||||
break;
|
||||
},
|
||||
metrics.disconnects.with_label_values(&["normal"]).inc();
|
||||
break;
|
||||
},
|
||||
Some(Err(WsError::Io(e))) => {
|
||||
// IO errors are considered fatal
|
||||
warn!("IO error (cid: {}, ip: {:?}): {:?}", cid, conn.ip(), e);
|
||||
metrics.disconnects.with_label_values(&["error"]).inc();
|
||||
|
||||
break;
|
||||
}
|
||||
x => {
|
||||
// default condition on error is to close the client connection
|
||||
info!("unknown error (cid: {}, ip: {:?}): {:?} (closing conn)", cid, conn.ip(), x);
|
||||
metrics.disconnects.with_label_values(&["error"]).inc();
|
||||
|
||||
break;
|
||||
}
|
||||
};
|
||||
@@ -577,94 +785,140 @@ async fn nostr_server(
|
||||
// An EventCmd needs to be validated to be converted into an Event
|
||||
// handle each type of message
|
||||
let evid = ec.event_id().to_owned();
|
||||
let parsed : Result<Event> = Result::<Event>::from(ec);
|
||||
let parsed : Result<EventWrapper> = Result::<EventWrapper>::from(ec);
|
||||
match parsed {
|
||||
Ok(e) => {
|
||||
Ok(WrappedEvent(e)) => {
|
||||
metrics.cmd_event.inc();
|
||||
let id_prefix:String = e.id.chars().take(8).collect();
|
||||
debug!("successfully parsed/validated event: {:?} (cid: {})", id_prefix, cid);
|
||||
// check if the event is too far in the future.
|
||||
if e.is_valid_timestamp(settings.options.reject_future_seconds) {
|
||||
debug!("successfully parsed/validated event: {:?} (cid: {}, kind: {})", id_prefix, cid, e.kind);
|
||||
// check if event is expired
|
||||
if e.is_expired() {
|
||||
let notice = Notice::invalid(e.id, "The event has already expired");
|
||||
ws_stream.send(make_notice_message(¬ice)).await.ok();
|
||||
// check if the event is too far in the future.
|
||||
} else if e.is_valid_timestamp(settings.options.reject_future_seconds) {
|
||||
// Write this to the database.
|
||||
let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone() };
|
||||
let auth_pubkey = conn.auth_pubkey().and_then(|pubkey| hex::decode(&pubkey).ok());
|
||||
let submit_event = SubmittedEvent {
|
||||
event: e.clone(),
|
||||
notice_tx: notice_tx.clone(),
|
||||
source_ip: conn.ip().to_string(),
|
||||
origin: client_info.origin.clone(),
|
||||
user_agent: client_info.user_agent.clone(),
|
||||
auth_pubkey };
|
||||
event_tx.send(submit_event).await.ok();
|
||||
client_published_event_count += 1;
|
||||
} else {
|
||||
info!("client: {} sent a far future-dated event", cid);
|
||||
if let Some(fut_sec) = settings.options.reject_future_seconds {
|
||||
let msg = format!("The event created_at field is out of the acceptable range (+{}sec) for this relay.",fut_sec);
|
||||
let notice = Notice::invalid(e.id, &msg);
|
||||
ws_stream.send(make_notice_message(notice)).await.ok();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("client: {} sent a far future-dated event", cid);
|
||||
if let Some(fut_sec) = settings.options.reject_future_seconds {
|
||||
let msg = format!("The event created_at field is out of the acceptable range (+{fut_sec}sec) for this relay.");
|
||||
let notice = Notice::invalid(e.id, &msg);
|
||||
ws_stream.send(make_notice_message(¬ice)).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
Ok(WrappedAuth(event)) => {
|
||||
metrics.cmd_auth.inc();
|
||||
if settings.authorization.nip42_auth {
|
||||
let id_prefix:String = event.id.chars().take(8).collect();
|
||||
debug!("successfully parsed auth: {:?} (cid: {})", id_prefix, cid);
|
||||
match &settings.info.relay_url {
|
||||
None => {
|
||||
error!("AUTH command received, but relay_url is not set in the config file (cid: {})", cid);
|
||||
},
|
||||
Some(relay) => {
|
||||
match conn.authenticate(&event, &relay) {
|
||||
Ok(_) => {
|
||||
let pubkey = match conn.auth_pubkey() {
|
||||
Some(k) => k.chars().take(8).collect(),
|
||||
None => "<unspecified>".to_string(),
|
||||
};
|
||||
info!("client is authenticated: (cid: {}, pubkey: {:?})", cid, pubkey);
|
||||
},
|
||||
Err(e) => {
|
||||
info!("authentication error: {} (cid: {})", e, cid);
|
||||
ws_stream.send(make_notice_message(&Notice::message(format!("Authentication error: {e}")))).await.ok();
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let e = CommandUnknownError;
|
||||
info!("client sent an invalid event (cid: {})", cid);
|
||||
ws_stream.send(make_notice_message(&Notice::invalid(evid, &format!("{e}")))).await.ok();
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
metrics.cmd_event.inc();
|
||||
info!("client sent an invalid event (cid: {})", cid);
|
||||
ws_stream.send(make_notice_message(Notice::invalid(evid, &format!("{}", e)))).await.ok();
|
||||
ws_stream.send(make_notice_message(&Notice::invalid(evid, &format!("{e}")))).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
Ok(NostrMessage::SubMsg(s)) => {
|
||||
debug!("subscription requested (cid: {}, sub: {:?})", cid, s.id);
|
||||
// subscription handling consists of:
|
||||
// * check for rate limits
|
||||
// * registering the subscription so future events can be matched
|
||||
// * making a channel to cancel to request later
|
||||
// * sending a request for a SQL query
|
||||
// Do nothing if the sub already exists.
|
||||
if !current_subs.contains(&s) {
|
||||
current_subs.push(s.clone());
|
||||
// Do nothing if the sub already exists.
|
||||
if conn.has_subscription(&s) {
|
||||
info!("client sent duplicate subscription, ignoring (cid: {}, sub: {:?})", cid, s.id);
|
||||
} else {
|
||||
metrics.cmd_req.inc();
|
||||
if let Some(ref lim) = sub_lim_opt {
|
||||
lim.until_ready_with_jitter(jitter).await;
|
||||
}
|
||||
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
|
||||
match conn.subscribe(s.clone()) {
|
||||
Ok(()) => {
|
||||
Ok(()) => {
|
||||
// when we insert, if there was a previous query running with the same name, cancel it.
|
||||
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
|
||||
previous_query.send(()).ok();
|
||||
if let Some(previous_query) = running_queries.insert(s.id.clone(), abandon_query_tx) {
|
||||
previous_query.send(()).ok();
|
||||
}
|
||||
// start a database query
|
||||
db::db_query(s, cid.to_owned(), pool.clone(), query_tx.clone(), abandon_query_rx).await;
|
||||
},
|
||||
Err(e) => {
|
||||
info!("Subscription error: {}", e);
|
||||
ws_stream.send(make_notice_message(Notice::message(format!("Subscription error: {}", e)))).await.ok();
|
||||
}
|
||||
if s.needs_historical_events() {
|
||||
// start a database query. this spawns a blocking database query on a worker thread.
|
||||
repo.query_subscription(s, cid.clone(), query_tx.clone(), abandon_query_rx).await.ok();
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
info!("Subscription error: {} (cid: {}, sub: {:?})", e, cid, s.id);
|
||||
ws_stream.send(make_notice_message(&Notice::message(format!("Subscription error: {e}")))).await.ok();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("client send duplicate subscription, ignoring (cid: {}, sub: {:?})", cid, s.id);
|
||||
}
|
||||
}
|
||||
},
|
||||
Ok(NostrMessage::CloseMsg(cc)) => {
|
||||
// closing a request simply removes the subscription.
|
||||
let parsed : Result<Close> = Result::<Close>::from(cc);
|
||||
if let Ok(c) = parsed {
|
||||
// remove from the list of known subs
|
||||
if let Some(pos) = current_subs.iter().position(|s| *s.id == c.id) {
|
||||
current_subs.remove(pos);
|
||||
}
|
||||
|
||||
// check if a query is currently
|
||||
// running, and remove it if so.
|
||||
let stop_tx = running_queries.remove(&c.id);
|
||||
if let Some(tx) = stop_tx {
|
||||
tx.send(()).ok();
|
||||
}
|
||||
// stop checking new events against
|
||||
// the subscription
|
||||
conn.unsubscribe(&c);
|
||||
} else {
|
||||
info!("invalid command ignored");
|
||||
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
|
||||
if let Ok(c) = parsed {
|
||||
metrics.cmd_close.inc();
|
||||
// check if a query is currently
|
||||
// running, and remove it if so.
|
||||
let stop_tx = running_queries.remove(&c.id);
|
||||
if let Some(tx) = stop_tx {
|
||||
tx.send(()).ok();
|
||||
}
|
||||
// stop checking new events against
|
||||
// the subscription
|
||||
conn.unsubscribe(&c);
|
||||
} else {
|
||||
info!("invalid command ignored");
|
||||
ws_stream.send(make_notice_message(&Notice::message("could not parse command".into()))).await.ok();
|
||||
}
|
||||
},
|
||||
Err(Error::ConnError) => {
|
||||
debug!("got connection close/error, disconnecting cid: {}, ip: {:?}",cid, conn.ip());
|
||||
break;
|
||||
}
|
||||
Err(Error::EventMaxLengthError(s)) => {
|
||||
info!("client sent event larger ({} bytes) than max size (cid: {})", s, cid);
|
||||
ws_stream.send(make_notice_message(Notice::message("event exceeded max size".into()))).await.ok();
|
||||
info!("client sent command larger ({} bytes) than max size (cid: {})", s, cid);
|
||||
ws_stream.send(make_notice_message(&Notice::message("event exceeded max size".into()))).await.ok();
|
||||
},
|
||||
Err(Error::ProtoParseError) => {
|
||||
info!("client sent event that could not be parsed (cid: {})", cid);
|
||||
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
|
||||
info!("client sent command that could not be parsed (cid: {})", cid);
|
||||
ws_stream.send(make_notice_message(&Notice::message("could not parse command".into()))).await.ok();
|
||||
},
|
||||
Err(e) => {
|
||||
info!("got non-fatal error from client (cid: {}, error: {:?}", cid, e);
|
||||
@@ -683,6 +937,23 @@ async fn nostr_server(
|
||||
conn.ip(),
|
||||
client_published_event_count,
|
||||
client_received_event_count,
|
||||
orig_start.elapsed()
|
||||
orig_start.elapsed()
|
||||
);
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct NostrMetrics {
|
||||
pub query_sub: Histogram, // response time of successful subscriptions
|
||||
pub query_db: Histogram, // individual database query execution time
|
||||
pub db_connections: IntGauge, // database connections in use
|
||||
pub write_events: Histogram, // response time of event writes
|
||||
pub sent_events: IntCounterVec, // count of events sent to clients
|
||||
pub connections: IntCounter, // count of websocket connections
|
||||
pub disconnects: IntCounterVec, // client disconnects
|
||||
pub query_aborts: IntCounterVec, // count of queries aborted by server
|
||||
pub cmd_req: IntCounter, // count of REQ commands received
|
||||
pub cmd_event: IntCounter, // count of EVENT commands received
|
||||
pub cmd_close: IntCounter, // count of CLOSE commands received
|
||||
pub cmd_auth: IntCounter, // count of AUTH commands received
|
||||
|
||||
}
|
||||
|
@@ -2,7 +2,8 @@
|
||||
use crate::error::Result;
|
||||
use crate::event::Event;
|
||||
use serde::de::Unexpected;
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
use serde::ser::SerializeMap;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
@@ -19,7 +20,7 @@ pub struct Subscription {
|
||||
/// Corresponds to client-provided subscription request elements. Any
|
||||
/// element can be present if it should be used in filtering, or
|
||||
/// absent ([`None`]) if it should be ignored.
|
||||
#[derive(Serialize, PartialEq, Eq, Debug, Clone)]
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
pub struct ReqFilter {
|
||||
/// Event hashes
|
||||
pub ids: Option<Vec<String>>,
|
||||
@@ -34,7 +35,6 @@ pub struct ReqFilter {
|
||||
/// Limit number of results
|
||||
pub limit: Option<u64>,
|
||||
/// Set of tags
|
||||
#[serde(skip)]
|
||||
pub tags: Option<HashMap<char, HashSet<String>>>,
|
||||
/// Force no matches due to malformed data
|
||||
// we can't represent it in the req filter, so we don't want to
|
||||
@@ -43,6 +43,40 @@ pub struct ReqFilter {
|
||||
pub force_no_match: bool,
|
||||
}
|
||||
|
||||
impl Serialize for ReqFilter {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where S:Serializer,
|
||||
{
|
||||
let mut map = serializer.serialize_map(None)?;
|
||||
if let Some(ids) = &self.ids {
|
||||
map.serialize_entry("ids", &ids)?;
|
||||
}
|
||||
if let Some(kinds) = &self.kinds {
|
||||
map.serialize_entry("kinds", &kinds)?;
|
||||
}
|
||||
if let Some(until) = &self.until {
|
||||
map.serialize_entry("until", until)?;
|
||||
}
|
||||
if let Some(since) = &self.since {
|
||||
map.serialize_entry("since", since)?;
|
||||
}
|
||||
if let Some(limit) = &self.limit {
|
||||
map.serialize_entry("limit", limit)?;
|
||||
}
|
||||
if let Some(authors) = &self.authors {
|
||||
map.serialize_entry("authors", &authors)?;
|
||||
}
|
||||
// serialize tags
|
||||
if let Some(tags) = &self.tags {
|
||||
for (k,v) in tags {
|
||||
let vals:Vec<&String> = v.iter().collect();
|
||||
map.serialize_entry(&format!("#{k}"), &vals)?;
|
||||
}
|
||||
}
|
||||
map.end()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ReqFilter {
|
||||
fn deserialize<D>(deserializer: D) -> Result<ReqFilter, D::Error>
|
||||
where
|
||||
@@ -65,12 +99,21 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
||||
tags: None,
|
||||
force_no_match: false,
|
||||
};
|
||||
let empty_string = "".into();
|
||||
let mut ts = None;
|
||||
// iterate through each key, and assign values that exist
|
||||
for (key, val) in filter.into_iter() {
|
||||
for (key, val) in filter {
|
||||
// ids
|
||||
if key == "ids" {
|
||||
rf.ids = Deserialize::deserialize(val).ok();
|
||||
let raw_ids: Option<Vec<String>>= Deserialize::deserialize(val).ok();
|
||||
if let Some(a) = raw_ids.as_ref() {
|
||||
if a.contains(&empty_string) {
|
||||
return Err(serde::de::Error::invalid_type(
|
||||
Unexpected::Other("prefix matches must not be empty strings"),
|
||||
&"a json object"));
|
||||
}
|
||||
}
|
||||
rf.ids =raw_ids;
|
||||
} else if key == "kinds" {
|
||||
rf.kinds = Deserialize::deserialize(val).ok();
|
||||
} else if key == "since" {
|
||||
@@ -80,7 +123,15 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
||||
} else if key == "limit" {
|
||||
rf.limit = Deserialize::deserialize(val).ok();
|
||||
} else if key == "authors" {
|
||||
rf.authors = Deserialize::deserialize(val).ok();
|
||||
let raw_authors: Option<Vec<String>>= Deserialize::deserialize(val).ok();
|
||||
if let Some(a) = raw_authors.as_ref() {
|
||||
if a.contains(&empty_string) {
|
||||
return Err(serde::de::Error::invalid_type(
|
||||
Unexpected::Other("prefix matches must not be empty strings"),
|
||||
&"a json object"));
|
||||
}
|
||||
}
|
||||
rf.authors = raw_authors;
|
||||
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
|
||||
if let Some(tag_search) = tag_search_char_from_filter(key) {
|
||||
if ts.is_none() {
|
||||
@@ -90,7 +141,7 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
||||
if let Some(m) = ts.as_mut() {
|
||||
let tag_vals: Option<Vec<String>> = Deserialize::deserialize(val).ok();
|
||||
if let Some(v) = tag_vals {
|
||||
let hs = HashSet::from_iter(v.into_iter());
|
||||
let hs = v.into_iter().collect::<HashSet<_>>();
|
||||
m.insert(tag_search.to_owned(), hs);
|
||||
}
|
||||
};
|
||||
@@ -171,6 +222,7 @@ impl<'de> Deserialize<'de> for Subscription {
|
||||
// create indexes
|
||||
filters.push(f);
|
||||
}
|
||||
filters.dedup();
|
||||
Ok(Subscription {
|
||||
id: sub_id.to_owned(),
|
||||
filters,
|
||||
@@ -180,13 +232,20 @@ impl<'de> Deserialize<'de> for Subscription {
|
||||
|
||||
impl Subscription {
|
||||
/// Get a copy of the subscription identifier.
|
||||
pub fn get_id(&self) -> String {
|
||||
#[must_use] pub fn get_id(&self) -> String {
|
||||
self.id.clone()
|
||||
}
|
||||
|
||||
/// Determine if any filter is requesting historical (database)
|
||||
/// queries. If every filter has limit:0, we do not need to query the DB.
|
||||
#[must_use] pub fn needs_historical_events(&self) -> bool {
|
||||
self.filters.iter().any(|f| f.limit!=Some(0))
|
||||
}
|
||||
|
||||
/// Determine if this subscription matches a given [`Event`]. Any
|
||||
/// individual filter match is sufficient.
|
||||
pub fn interested_in_event(&self, event: &Event) -> bool {
|
||||
for f in self.filters.iter() {
|
||||
#[must_use] pub fn interested_in_event(&self, event: &Event) -> bool {
|
||||
for f in &self.filters {
|
||||
if f.interested_in_event(event) {
|
||||
return true;
|
||||
}
|
||||
@@ -209,23 +268,20 @@ impl ReqFilter {
|
||||
fn ids_match(&self, event: &Event) -> bool {
|
||||
self.ids
|
||||
.as_ref()
|
||||
.map(|vs| prefix_match(vs, &event.id))
|
||||
.unwrap_or(true)
|
||||
.map_or(true, |vs| prefix_match(vs, &event.id))
|
||||
}
|
||||
|
||||
fn authors_match(&self, event: &Event) -> bool {
|
||||
self.authors
|
||||
.as_ref()
|
||||
.map(|vs| prefix_match(vs, &event.pubkey))
|
||||
.unwrap_or(true)
|
||||
.map_or(true, |vs| prefix_match(vs, &event.pubkey))
|
||||
}
|
||||
|
||||
fn delegated_authors_match(&self, event: &Event) -> bool {
|
||||
if let Some(delegated_pubkey) = &event.delegated_by {
|
||||
self.authors
|
||||
.as_ref()
|
||||
.map(|vs| prefix_match(vs, delegated_pubkey))
|
||||
.unwrap_or(true)
|
||||
.map_or(true, |vs| prefix_match(vs, delegated_pubkey))
|
||||
} else {
|
||||
false
|
||||
}
|
||||
@@ -251,16 +307,15 @@ impl ReqFilter {
|
||||
fn kind_match(&self, kind: u64) -> bool {
|
||||
self.kinds
|
||||
.as_ref()
|
||||
.map(|ks| ks.contains(&kind))
|
||||
.unwrap_or(true)
|
||||
.map_or(true, |ks| ks.contains(&kind))
|
||||
}
|
||||
|
||||
/// Determine if all populated fields in this filter match the provided event.
|
||||
pub fn interested_in_event(&self, event: &Event) -> bool {
|
||||
#[must_use] pub fn interested_in_event(&self, event: &Event) -> bool {
|
||||
// self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
|
||||
self.ids_match(event)
|
||||
&& self.since.map(|t| event.created_at > t).unwrap_or(true)
|
||||
&& self.until.map(|t| event.created_at < t).unwrap_or(true)
|
||||
&& self.since.map_or(true, |t| event.created_at > t)
|
||||
&& self.until.map_or(true, |t| event.created_at < t)
|
||||
&& self.kind_match(event.kind)
|
||||
&& (self.authors_match(event) || self.delegated_authors_match(event))
|
||||
&& self.tag_match(event)
|
||||
@@ -294,6 +349,24 @@ mod tests {
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn req_empty_authors_prefix() {
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"authors\": [\"\"]}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn req_empty_ids_prefix() {
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\"]}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn req_empty_ids_prefix_mixed() {
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\",\"aaa\"]}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn legacy_filter() {
|
||||
// legacy field in filter
|
||||
@@ -301,6 +374,23 @@ mod tests {
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dupe_filter() -> Result<()> {
|
||||
let raw_json = r#"["REQ","some-id",{"kinds": [1984]}, {"kinds": [1984]}]"#;
|
||||
let s: Subscription = serde_json::from_str(raw_json)?;
|
||||
assert_eq!(s.filters.len(), 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dupe_filter_many() -> Result<()> {
|
||||
// duplicate filters in different order
|
||||
let raw_json = r#"["REQ","some-id",{"kinds":[1984]},{"kinds":[1984]},{"kinds":[1984]},{"kinds":[1984]}]"#;
|
||||
let s: Subscription = serde_json::from_str(raw_json)?;
|
||||
assert_eq!(s.filters.len(), 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn author_filter() -> Result<()> {
|
||||
let raw_json = r#"["REQ","some-id",{"authors": ["test-author-id"]}]"#;
|
||||
@@ -532,4 +622,22 @@ mod tests {
|
||||
assert!(!s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_filter() -> Result<()> {
|
||||
let s: Subscription = serde_json::from_str(r##"["REQ","xyz",{"authors":["abc", "bcd"], "since": 10, "until": 20, "limit":100, "#e": ["foo", "bar"], "#d": ["test"]}]"##)?;
|
||||
let f = s.filters.get(0);
|
||||
let serialized = serde_json::to_string(&f)?;
|
||||
let serialized_wrapped = format!(r##"["REQ", "xyz",{}]"##, serialized);
|
||||
let parsed: Subscription = serde_json::from_str(&serialized_wrapped)?;
|
||||
let parsed_filter = parsed.filters.get(0);
|
||||
if let Some(pf) = parsed_filter {
|
||||
assert_eq!(pf.since, Some(10));
|
||||
assert_eq!(pf.until, Some(20));
|
||||
assert_eq!(pf.limit, Some(100));
|
||||
} else {
|
||||
assert!(false, "filter could not be parsed");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
40
src/utils.rs
40
src/utils.rs
@@ -1,8 +1,10 @@
|
||||
//! Common utility functions
|
||||
use bech32::FromBase32;
|
||||
use std::time::SystemTime;
|
||||
use url::Url;
|
||||
|
||||
/// Seconds since 1970.
|
||||
pub fn unix_time() -> u64 {
|
||||
#[must_use] pub fn unix_time() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map(|x| x.as_secs())
|
||||
@@ -10,17 +12,32 @@ pub fn unix_time() -> u64 {
|
||||
}
|
||||
|
||||
/// Check if a string contains only hex characters.
|
||||
pub fn is_hex(s: &str) -> bool {
|
||||
#[must_use] pub fn is_hex(s: &str) -> bool {
|
||||
s.chars().all(|x| char::is_ascii_hexdigit(&x))
|
||||
}
|
||||
|
||||
/// Check if string is a nip19 string
|
||||
pub fn is_nip19(s: &str) -> bool {
|
||||
s.starts_with("npub") || s.starts_with("note")
|
||||
}
|
||||
|
||||
pub fn nip19_to_hex(s: &str) -> Result<String, bech32::Error> {
|
||||
let (_hrp, data, _checksum) = bech32::decode(s)?;
|
||||
let data = Vec::<u8>::from_base32(&data)?;
|
||||
Ok(hex::encode(data))
|
||||
}
|
||||
|
||||
/// Check if a string contains only lower-case hex chars.
|
||||
pub fn is_lower_hex(s: &str) -> bool {
|
||||
#[must_use] pub fn is_lower_hex(s: &str) -> bool {
|
||||
s.chars().all(|x| {
|
||||
(char::is_ascii_lowercase(&x) || char::is_ascii_digit(&x)) && char::is_ascii_hexdigit(&x)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn host_str(url: &String) -> Option<String> {
|
||||
Url::parse(url).ok().and_then(|u| u.host_str().map(|s| s.to_string()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -30,4 +47,21 @@ mod tests {
|
||||
let hexstr = "abcd0123";
|
||||
assert_eq!(is_lower_hex(hexstr), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nip19() {
|
||||
let hexkey = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d";
|
||||
let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6";
|
||||
assert_eq!(is_nip19(hexkey), false);
|
||||
assert_eq!(is_nip19(nip19key), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nip19_hex() {
|
||||
let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6";
|
||||
let expected = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d";
|
||||
let got = nip19_to_hex(nip19key).unwrap();
|
||||
|
||||
assert_eq!(expected, got);
|
||||
}
|
||||
}
|
||||
|
10
tests/cli.rs
Normal file
10
tests/cli.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use nostr_rs_relay::cli::CLIArgs;
|
||||
|
||||
#[test]
|
||||
fn cli_tests() {
|
||||
use clap::CommandFactory;
|
||||
CLIArgs::command().debug_assert();
|
||||
}
|
||||
}
|
@@ -36,9 +36,9 @@ pub fn start_relay() -> Result<Relay> {
|
||||
settings.database.min_conn = 4;
|
||||
settings.database.max_conn = 8;
|
||||
let (shutdown_tx, shutdown_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
|
||||
let handle = thread::spawn(|| {
|
||||
let handle = thread::spawn(move || {
|
||||
// server will block the thread it is run on.
|
||||
let _ = start_server(settings, shutdown_rx);
|
||||
let _ = start_server(&settings, shutdown_rx);
|
||||
});
|
||||
// how do we know the relay has finished starting up?
|
||||
Ok(Relay {
|
||||
|
356
tests/conn.rs
Normal file
356
tests/conn.rs
Normal file
@@ -0,0 +1,356 @@
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bitcoin_hashes::hex::ToHex;
|
||||
use bitcoin_hashes::sha256;
|
||||
use bitcoin_hashes::Hash;
|
||||
use secp256k1::rand;
|
||||
use secp256k1::{KeyPair, Secp256k1, XOnlyPublicKey};
|
||||
|
||||
use nostr_rs_relay::conn::ClientConn;
|
||||
use nostr_rs_relay::error::Error;
|
||||
use nostr_rs_relay::event::Event;
|
||||
use nostr_rs_relay::utils::unix_time;
|
||||
|
||||
const RELAY: &str = "wss://nostr.example.com/";
|
||||
|
||||
#[test]
|
||||
fn test_generate_auth_challenge() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let last_auth_challenge = client_conn.auth_challenge().cloned();
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_ne!(
|
||||
client_conn.auth_challenge().unwrap(),
|
||||
&last_auth_challenge.unwrap()
|
||||
);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_authenticate_with_valid_event() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap();
|
||||
let event = auth_event(challenge);
|
||||
|
||||
let result = client_conn.authenticate(&event, &RELAY.into());
|
||||
|
||||
assert!(matches!(result, Ok(())));
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_in_invalid_state() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let event = auth_event(&"challenge".into());
|
||||
let result = client_conn.authenticate(&event, &RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_authenticate_when_already_authenticated() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap().clone();
|
||||
|
||||
let event = auth_event(&challenge);
|
||||
let result = client_conn.authenticate(&event, &RELAY.into());
|
||||
|
||||
assert!(matches!(result, Ok(())));
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey));
|
||||
|
||||
let event1 = auth_event(&challenge);
|
||||
let result1 = client_conn.authenticate(&event1, &RELAY.into());
|
||||
|
||||
assert!(matches!(result1, Ok(())));
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey));
|
||||
assert_ne!(client_conn.auth_pubkey(), Some(&event1.pubkey));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_with_invalid_event() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap();
|
||||
let mut event = auth_event(challenge);
|
||||
event.sig = event.sig.chars().rev().collect::<String>();
|
||||
|
||||
let result = client_conn.authenticate(&event, &RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_with_invalid_event_kind() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap();
|
||||
let event = auth_event_with_kind(challenge, 9999999999999999);
|
||||
|
||||
let result = client_conn.authenticate(&event, &RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_with_expired_timestamp() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap();
|
||||
let event = auth_event_with_created_at(challenge, unix_time() - 1200); // 20 minutes
|
||||
|
||||
let result = client_conn.authenticate(&event, &RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_with_future_timestamp() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap();
|
||||
let event = auth_event_with_created_at(challenge, unix_time() + 1200); // 20 minutes
|
||||
|
||||
let result = client_conn.authenticate(&event, &RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_without_tags() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let event = auth_event_without_tags();
|
||||
|
||||
let result = client_conn.authenticate(&event, &RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_without_challenge() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let event = auth_event_without_challenge();
|
||||
|
||||
let result = client_conn.authenticate(&event, &RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_without_relay() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap();
|
||||
let event = auth_event_without_relay(challenge);
|
||||
|
||||
let result = client_conn.authenticate(&event, &RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_with_invalid_challenge() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let event = auth_event(&"invalid challenge".into());
|
||||
|
||||
let result = client_conn.authenticate(&event, &RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_with_invalid_relay() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap();
|
||||
let event = auth_event_with_relay(challenge, &"xyz".into());
|
||||
|
||||
let result = client_conn.authenticate(&event, &RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
fn auth_event(challenge: &String) -> Event {
|
||||
create_auth_event(Some(challenge), Some(&RELAY.into()), 22242, unix_time())
|
||||
}
|
||||
|
||||
fn auth_event_with_kind(challenge: &String, kind: u64) -> Event {
|
||||
create_auth_event(Some(challenge), Some(&RELAY.into()), kind, unix_time())
|
||||
}
|
||||
|
||||
fn auth_event_with_created_at(challenge: &String, created_at: u64) -> Event {
|
||||
create_auth_event(Some(challenge), Some(&RELAY.into()), 22242, created_at)
|
||||
}
|
||||
|
||||
fn auth_event_without_challenge() -> Event {
|
||||
create_auth_event(None, Some(&RELAY.into()), 22242, unix_time())
|
||||
}
|
||||
|
||||
fn auth_event_without_relay(challenge: &String) -> Event {
|
||||
create_auth_event(Some(challenge), None, 22242, unix_time())
|
||||
}
|
||||
|
||||
fn auth_event_without_tags() -> Event {
|
||||
create_auth_event(None, None, 22242, unix_time())
|
||||
}
|
||||
|
||||
fn auth_event_with_relay(challenge: &String, relay: &String) -> Event {
|
||||
create_auth_event(Some(challenge), Some(relay), 22242, unix_time())
|
||||
}
|
||||
|
||||
fn create_auth_event(
|
||||
challenge: Option<&String>,
|
||||
relay: Option<&String>,
|
||||
kind: u64,
|
||||
created_at: u64,
|
||||
) -> Event {
|
||||
let secp = Secp256k1::new();
|
||||
let key_pair = KeyPair::new(&secp, &mut rand::thread_rng());
|
||||
let public_key = XOnlyPublicKey::from_keypair(&key_pair);
|
||||
|
||||
let mut tags: Vec<Vec<String>> = vec![];
|
||||
|
||||
if let Some(c) = challenge {
|
||||
let tag = vec!["challenge".into(), c.into()];
|
||||
tags.push(tag);
|
||||
}
|
||||
|
||||
if let Some(r) = relay {
|
||||
let tag = vec!["relay".into(), r.into()];
|
||||
tags.push(tag);
|
||||
}
|
||||
|
||||
let mut event = Event {
|
||||
id: "0".to_owned(),
|
||||
pubkey: public_key.to_hex(),
|
||||
delegated_by: None,
|
||||
created_at: created_at,
|
||||
kind: kind,
|
||||
tags: tags,
|
||||
content: "".to_owned(),
|
||||
sig: "0".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
|
||||
let c = event.to_canonical().unwrap();
|
||||
let digest: sha256::Hash = sha256::Hash::hash(c.as_bytes());
|
||||
|
||||
let msg = secp256k1::Message::from_slice(digest.as_ref()).unwrap();
|
||||
let sig = secp.sign_schnorr(&msg, &key_pair);
|
||||
|
||||
event.id = format!("{digest:x}");
|
||||
event.sig = sig.to_hex();
|
||||
|
||||
event
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user