mirror of
https://github.com/scsibug/nostr-rs-relay.git
synced 2025-09-01 03:40:46 -04:00
Compare commits
103 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
f3a42712a6 | ||
|
27361d064a | ||
|
3bafb611e5 | ||
|
b960ab70de | ||
|
15e2f097aa | ||
|
185f9e7abb | ||
|
f44dae6ac9 | ||
|
abc356c17d | ||
|
81f8256c37 | ||
|
b3db2bd081 | ||
|
d31e974d56 | ||
|
36eaf9fea5 | ||
|
a16c4e698a | ||
|
e63d179424 | ||
|
28b7b83a6e | ||
|
2e42b1b86e | ||
|
bd07a11f50 | ||
|
bc4b45d4b8 | ||
|
1ca5d652de | ||
|
d7cceab8fc | ||
|
2805a96e5b | ||
|
ac14a0759f | ||
|
cdd4e5949f | ||
|
5999009779 | ||
|
e36c791c53 | ||
|
d95adbcb3d | ||
|
509736c56d | ||
|
8004ea9b44 | ||
|
866c239cc9 | ||
|
6012b57e95 | ||
|
559541b160 | ||
|
facaed7805 | ||
|
ba4fcd072a | ||
|
2b79099cfe | ||
|
eb1d2d717d | ||
|
e5e03d4378 | ||
|
c377b136aa | ||
|
bca5614a82 | ||
|
f7550b4c61 | ||
|
1623bacd0d | ||
|
2bbde8ad09 | ||
|
a42004c30c | ||
|
9dd97908cf | ||
|
ab749e9cf0 | ||
|
1820e9c689 | ||
|
2d3a35fe30 | ||
|
9c77b06f79 | ||
|
c8e8b71b91 | ||
|
6d57adef73 | ||
|
111eb4a10c | ||
|
214f152c5d | ||
|
3fcaf97a15 | ||
|
cec501b37f | ||
|
2557c7f69c | ||
|
3979a94726 | ||
|
71bdbfb425 | ||
|
b6798f96b6 | ||
|
c1152ce430 | ||
|
6f1a4e7d76 | ||
|
1804bee912 | ||
|
34db91940c | ||
|
0859e535ed | ||
|
bdd4e43df4 | ||
|
dfa6985f44 | ||
|
57e1b53c13 | ||
|
53f83aa923 | ||
|
34a8f99d61 | ||
|
c8f7420334 | ||
|
e2869e8fad | ||
|
5c07b2eca5 | ||
|
25752abe6b | ||
|
16f6e974c8 | ||
|
744d467a28 | ||
|
b094fbcabd | ||
|
4121c872bc | ||
|
6489e685ab | ||
|
6800c2e39d | ||
|
e996d4c009 | ||
|
2331c881d7 | ||
|
585fdd3884 | ||
|
cf3e67500f | ||
|
1d19442cfd | ||
|
13cc24b5cd | ||
|
f543957b34 | ||
|
7021f102e8 | ||
|
fddbf321bc | ||
|
3e7f2e21df | ||
|
9d9c6c78d1 | ||
|
703b2efe6e | ||
|
0db6487ce3 | ||
|
ba987d3212 | ||
|
73f4f60cc7 | ||
|
d06d227ebe | ||
|
3519488c4e | ||
|
fbd3315110 | ||
|
3d3d1bde53 | ||
|
ed336111bb | ||
|
8aed572989 | ||
|
62e8da689d | ||
|
807d1aa384 | ||
|
66a55b55b9 | ||
|
76c77c3e56 | ||
|
50daab8a6f |
@@ -7,6 +7,7 @@ environment:
|
||||
packages:
|
||||
- cargo
|
||||
- sqlite-devel
|
||||
- protobuf-compiler
|
||||
sources:
|
||||
- https://git.sr.ht/~gheartsfield/nostr-rs-relay/
|
||||
shell: false
|
||||
|
39
.github/workflows/ci.yml
vendored
Normal file
39
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Test and build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
test_nostr-rs-relay:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Update local toolchain
|
||||
run: |
|
||||
sudo apt-get install -y protobuf-compiler
|
||||
rustup update
|
||||
rustup component add clippy
|
||||
rustup install nightly
|
||||
|
||||
- name: Toolchain info
|
||||
run: |
|
||||
cargo --version --verbose
|
||||
rustc --version
|
||||
cargo clippy --version
|
||||
|
||||
# - name: Lint
|
||||
# run: |
|
||||
# cargo fmt -- --check
|
||||
# cargo clippy -- -D warnings
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
cargo check
|
||||
cargo test --all
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
cargo build --release --locked
|
1359
Cargo.lock
generated
1359
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
15
Cargo.toml
15
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "nostr-rs-relay"
|
||||
version = "0.7.16"
|
||||
version = "0.8.3"
|
||||
edition = "2021"
|
||||
authors = ["Greg Heartsfield <scsibug@imap.cc>"]
|
||||
description = "A relay implementation for the Nostr protocol"
|
||||
@@ -12,9 +12,12 @@ keywords = ["nostr", "server"]
|
||||
categories = ["network-programming", "web-programming"]
|
||||
|
||||
[dependencies]
|
||||
clap = { version = "4.0.32", features = ["env", "default", "derive"]}
|
||||
tracing = "0.1.36"
|
||||
tracing-subscriber = "0.2.0"
|
||||
tokio = { version = "1", features = ["full", "tracing", "signal"] }
|
||||
prost = "0.11"
|
||||
tonic = "0.8.3"
|
||||
console-subscriber = "0.1.8"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
@@ -41,6 +44,16 @@ parse_duration = "2"
|
||||
rand = "0.8"
|
||||
const_format = "0.2.28"
|
||||
regex = "1"
|
||||
async-trait = "0.1.60"
|
||||
async-std = "1.12.0"
|
||||
sqlx = { version ="0.6.2", features=["runtime-tokio-rustls", "postgres", "chrono"]}
|
||||
chrono = "0.4.23"
|
||||
prometheus = "0.13.3"
|
||||
indicatif = "0.17.3"
|
||||
bech32 = "0.9.1"
|
||||
|
||||
[dev-dependencies]
|
||||
anyhow = "1"
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = { version="0.8.3", features = ["prost"] }
|
||||
|
@@ -1,5 +1,7 @@
|
||||
FROM docker.io/library/rust:1.66.0 as builder
|
||||
|
||||
FROM docker.io/library/rust:1-bookworm as builder
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y cmake protobuf-compiler \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN USER=root cargo install cargo-auditable
|
||||
RUN USER=root cargo new --bin nostr-rs-relay
|
||||
WORKDIR ./nostr-rs-relay
|
||||
@@ -12,6 +14,8 @@ RUN rm src/*.rs
|
||||
|
||||
# copy project source code
|
||||
COPY ./src ./src
|
||||
COPY ./proto ./proto
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# build auditable release using locked deps
|
||||
RUN rm ./target/release/deps/nostr*relay*
|
||||
|
12
README.md
12
README.md
@@ -2,7 +2,8 @@
|
||||
|
||||
This is a [nostr](https://github.com/nostr-protocol/nostr) relay,
|
||||
written in Rust. It currently supports the entire relay protocol, and
|
||||
persists data with SQLite.
|
||||
persists data with SQLite. There is experimental support for
|
||||
Postgresql.
|
||||
|
||||
The project master repository is available on
|
||||
[sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is
|
||||
@@ -10,6 +11,9 @@ mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
|
||||
|
||||
[](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master?)
|
||||
|
||||

|
||||
|
||||
|
||||
## Features
|
||||
|
||||
[NIPs](https://github.com/nostr-protocol/nips) with a relay-specific implementation are listed here.
|
||||
@@ -30,6 +34,7 @@ mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
|
||||
- [x] NIP-22: [Event `created_at` limits](https://github.com/nostr-protocol/nips/blob/master/22.md) (_future-dated events only_)
|
||||
- [ ] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md) (_implemented, but currently disabled_)
|
||||
- [x] NIP-28: [Public Chat](https://github.com/nostr-protocol/nips/blob/master/28.md)
|
||||
- [x] NIP-33: [Parameterized Replaceable Events](https://github.com/nostr-protocol/nips/blob/master/33.md)
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -148,3 +153,8 @@ To chat about `nostr-rs-relay` on `nostr` itself; visit our channel on [anigma](
|
||||
License
|
||||
---
|
||||
This project is MIT licensed.
|
||||
|
||||
External Documentation and Links
|
||||
---
|
||||
|
||||
* [BlockChainCaffe's Nostr Relay Setup Guide](https://github.com/BlockChainCaffe/Nostr-Relay-Setup-Guide)
|
||||
|
4
build.rs
Normal file
4
build.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
tonic_build::compile_protos("proto/nauthz.proto")?;
|
||||
Ok(())
|
||||
}
|
39
config.toml
39
config.toml
@@ -18,16 +18,20 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
|
||||
|
||||
[diagnostics]
|
||||
# Enable tokio tracing (for use with tokio-console)
|
||||
#tracing = true
|
||||
#tracing = false
|
||||
|
||||
[database]
|
||||
# Database engine (sqlite/postgres). Defaults to sqlite.
|
||||
# Support for postgres is currently experimental.
|
||||
#engine = "sqlite"
|
||||
|
||||
# Directory for SQLite files. Defaults to the current directory. Can
|
||||
# also be specified (and overriden) with the "--db dirname" command
|
||||
# line option.
|
||||
data_directory = "."
|
||||
|
||||
#data_directory = "."
|
||||
|
||||
# Use an in-memory database instead of 'nostr.db'.
|
||||
# Requires sqlite engine.
|
||||
# Caution; this will not survive a process restart!
|
||||
#in_memory = false
|
||||
|
||||
@@ -40,6 +44,20 @@ data_directory = "."
|
||||
# to approx the number of cores.
|
||||
#max_conn = 8
|
||||
|
||||
# Database connection string. Required for postgres; not used for
|
||||
# sqlite.
|
||||
#connection = "postgresql://postgres:nostr@localhost:7500/nostr"
|
||||
|
||||
[grpc]
|
||||
# gRPC interfaces for externalized decisions and other extensions to
|
||||
# functionality.
|
||||
#
|
||||
# Events can be authorized through an external service, by providing
|
||||
# the URL below. In the event the server is not accessible, events
|
||||
# will be permitted. The protobuf3 schema used is available in
|
||||
# `proto/nauthz.proto`.
|
||||
# event_authorization_server = "http://[::1]:50051"
|
||||
|
||||
[network]
|
||||
# Bind to this network address
|
||||
address = "0.0.0.0"
|
||||
@@ -63,13 +81,18 @@ reject_future_seconds = 1800
|
||||
|
||||
[limits]
|
||||
# Limit events created per second, averaged over one minute. Must be
|
||||
# an integer. If not set (or set to 0), defaults to unlimited. Note:
|
||||
# an integer. If not set (or set to 0), there is no limit. Note:
|
||||
# this is for the server as a whole, not per-connection.
|
||||
# messages_per_sec = 0
|
||||
#
|
||||
# Limiting event creation is highly recommended if your relay is
|
||||
# public!
|
||||
#
|
||||
#messages_per_sec = 5
|
||||
|
||||
# Limit client subscriptions created per second, averaged over one
|
||||
# minute. Must be an integer. If not set (or set to 0), defaults to
|
||||
# unlimited.
|
||||
# Limit client subscriptions created, averaged over one minute. Must
|
||||
# be an integer. If not set (or set to 0), defaults to unlimited.
|
||||
# Strongly recommended to set this to a low value such as 10 to ensure
|
||||
# fair service.
|
||||
#subscriptions_per_min = 0
|
||||
|
||||
# UNIMPLEMENTED...
|
||||
|
@@ -7,7 +7,7 @@ intervention. For heavily trafficked relays, there are a number of
|
||||
steps that the operator may need to take to maintain performance and
|
||||
limit disk usage.
|
||||
|
||||
This maintenance guide is current as of version `0.7.14`. Future
|
||||
This maintenance guide is current as of version `0.8.2`. Future
|
||||
versions may incorporate and automate some of these steps.
|
||||
|
||||
## Backing Up the Database
|
||||
@@ -22,7 +22,7 @@ dated file, and then compress to minimize size:
|
||||
|
||||
```console
|
||||
BACKUP_FILE=/var/backups/nostr/`date +%Y%m%d_%H%M`.db
|
||||
sqlite3 -readonly /apps/nostr-relay/nostr.db ".backup $BACKUP_FILE
|
||||
sqlite3 -readonly /apps/nostr-relay/nostr.db ".backup $BACKUP_FILE"
|
||||
sqlite3 $BACKUP_FILE "vacuum;"
|
||||
bzip2 -9 $BACKUP_FILE
|
||||
```
|
||||
@@ -43,18 +43,15 @@ vacuum;
|
||||
|
||||
## Clearing Hidden Events
|
||||
|
||||
When events are deleted, either through deletion events, metadata or
|
||||
follower updates, or a replaceable event kind, the event is not
|
||||
actually removed from the database. Instead, a flag `HIDDEN` is set
|
||||
to true for the event, which excludes it from search results. The
|
||||
original intent was to ensure that subsequent rebroadcasts of the
|
||||
event would be easily detected as having been deleted, and would not
|
||||
need to be stored again. In practice, this decision causes excessive
|
||||
growth of the `tags` table, since all the previous followers are
|
||||
retained for those `HIDDEN` events.
|
||||
When events are deleted, the event is not actually removed from the
|
||||
database. Instead, a flag `HIDDEN` is set to true for the event,
|
||||
which excludes it from search results. High volume replacements from
|
||||
profile or other replaceable events are deleted, not hidden, in the
|
||||
current version of the relay.
|
||||
|
||||
The `event` and especially the `tag` table can be significantly
|
||||
reduced in size by running these commands:
|
||||
In the current version, removing hidden events should not result in
|
||||
significant space savings, but it can still be used if there is no
|
||||
desire to hold on to events that can never be re-broadcast.
|
||||
|
||||
```console
|
||||
PRAGMA foreign_keys = ON;
|
||||
|
79
docs/grpc-extensions.md
Normal file
79
docs/grpc-extensions.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# gRPC Extensions Design Document
|
||||
|
||||
The relay will be extensible through gRPC endpoints, definable in the
|
||||
main configuration file. These will allow external programs to host
|
||||
logic for deciding things such as, should this event be persisted,
|
||||
should this connection be allowed, and should this subscription
|
||||
request be registered. The primary goal is allow for relay operator
|
||||
specific functionality that allows them to serve smaller communities
|
||||
and reduce spam and abuse.
|
||||
|
||||
This will likely evolve substantially, the first goal is to get a
|
||||
basic one-way service that lets an externalized program decide on
|
||||
event persistance. This does not represent the final state of gRPC
|
||||
extensibility in `nostr-rs-relay`.
|
||||
|
||||
## Considerations
|
||||
|
||||
Write event latency must not be significantly affected. However, the
|
||||
primary reason we are implementing this is spam/abuse protection, so
|
||||
we are willing to tolerate some increase in latency if that protects
|
||||
us against outages!
|
||||
|
||||
The interface should provide enough information to make simple
|
||||
decisions, without burdening the relay to do extra queries. The
|
||||
decision endpoint will be mostly responsible for maintaining state and
|
||||
gathering additional details.
|
||||
|
||||
## Design Overview
|
||||
|
||||
A gRPC server may be defined in the `config.toml` file. If it exists,
|
||||
the relay will attempt to connect to it and send a message for each
|
||||
`EVENT` command submitted by clients. If a successful response is
|
||||
returned indicating the event is permitted, the relay continues
|
||||
processing the event as normal. All existing whitelist, blacklist,
|
||||
and `NIP-05` validation checks are still performed and MAY still
|
||||
result in the event being rejected. If a successful response is
|
||||
returned indicated the decision is anything other than permit, then
|
||||
the relay MUST reject the event, and return a command result to the
|
||||
user (using `NIP-20`) indicating the event was blocked (optionally
|
||||
providing a message).
|
||||
|
||||
In the event there is an error in the gRPC interface, event processing
|
||||
proceeds as if gRPC was disabled (fail open). This allows gRPC
|
||||
servers to be deployed with minimal chance of causing a full relay
|
||||
outage.
|
||||
|
||||
## Design Details
|
||||
|
||||
Currently one procedure call is supported, `EventAdmit`, in the
|
||||
`Authorization` service. It accepts the following data in order to
|
||||
support authorization decisions:
|
||||
|
||||
- The event itself
|
||||
- The client IP that submitted the event
|
||||
- The client's HTTP origin header, if one exists
|
||||
- The client's HTTP user agent header, if one exists
|
||||
- The public key of the client, if `NIP-42` authentication was
|
||||
performed (not supported in the relay yet!)
|
||||
- The `NIP-05` associated with the event's public key, if it is known
|
||||
to the relay
|
||||
|
||||
A server providing authorization decisions will return the following:
|
||||
|
||||
- A decision to permit or deny the event
|
||||
- An optional message that explains why the event was denied, to be
|
||||
transmitted to the client
|
||||
|
||||
## Security Issues
|
||||
|
||||
There is little attempt to secure this interface, since it is intended
|
||||
for use processes running on the same host. It is recommended to
|
||||
ensure that the gRPC server providing the API is not exposed to the
|
||||
public Internet. Authorization server implementations should have
|
||||
their own security reviews performed.
|
||||
|
||||
A slow gRPC server could cause availability issues for event
|
||||
processing, since this is performed on a single thread. Avoid any
|
||||
expensive or long-running processes that could result from submitted
|
||||
events, since any client can initiate a gRPC call to the service.
|
1010
nauthz_server_example/Cargo.lock
generated
Normal file
1010
nauthz_server_example/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
13
nauthz_server_example/Cargo.toml
Normal file
13
nauthz_server_example/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "nauthz-server"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
# Common dependencies
|
||||
tokio = { version = "1.0", features = ["rt-multi-thread", "macros"] }
|
||||
prost = "0.11"
|
||||
tonic = "0.8.3"
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = { version="0.8.3", features = ["prost"] }
|
4
nauthz_server_example/build.rs
Normal file
4
nauthz_server_example/build.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
tonic_build::compile_protos("../proto/nauthz.proto")?;
|
||||
Ok(())
|
||||
}
|
61
nauthz_server_example/src/main.rs
Normal file
61
nauthz_server_example/src/main.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
use tonic::{transport::Server, Request, Response, Status};
|
||||
|
||||
use nauthz_grpc::authorization_server::{Authorization, AuthorizationServer};
|
||||
use nauthz_grpc::{EventReply, EventRequest, Decision};
|
||||
|
||||
pub mod nauthz_grpc {
|
||||
tonic::include_proto!("nauthz");
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct EventAuthz {
|
||||
allowed_kinds: Vec<u64>,
|
||||
}
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl Authorization for EventAuthz {
|
||||
|
||||
async fn event_admit(
|
||||
&self,
|
||||
request: Request<EventRequest>,
|
||||
) -> Result<Response<EventReply>, Status> {
|
||||
let reply;
|
||||
let req = request.into_inner();
|
||||
let event = req.event.unwrap();
|
||||
let content_prefix:String = event.content.chars().take(40).collect();
|
||||
println!("recvd event, [kind={}, origin={:?}, nip05_domain={:?}, tag_count={}, content_sample={:?}]",
|
||||
event.kind, req.origin, req.nip05.map(|x| x.domain), event.tags.len(), content_prefix);
|
||||
// Permit any event with a whitelisted kind
|
||||
if self.allowed_kinds.contains(&event.kind) {
|
||||
println!("This looks fine! (kind={})",event.kind);
|
||||
reply = nauthz_grpc::EventReply {
|
||||
decision: Decision::Permit as i32,
|
||||
message: None
|
||||
};
|
||||
} else {
|
||||
println!("Blocked! (kind={})",event.kind);
|
||||
reply = nauthz_grpc::EventReply {
|
||||
decision: Decision::Deny as i32,
|
||||
message: Some(format!("kind {} not permitted", event.kind)),
|
||||
};
|
||||
}
|
||||
Ok(Response::new(reply))
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let addr = "[::1]:50051".parse().unwrap();
|
||||
|
||||
// A simple authorization engine that allows kinds 0-3
|
||||
let checker = EventAuthz {
|
||||
allowed_kinds: vec![0,1,2,3],
|
||||
};
|
||||
println!("EventAuthz Server listening on {}", addr);
|
||||
// Start serving
|
||||
Server::builder()
|
||||
.add_service(AuthorizationServer::new(checker))
|
||||
.serve(addr)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
60
proto/nauthz.proto
Normal file
60
proto/nauthz.proto
Normal file
@@ -0,0 +1,60 @@
|
||||
syntax = "proto3";
|
||||
|
||||
// Nostr Authorization Services
|
||||
package nauthz;
|
||||
|
||||
// Authorization for actions against a relay
|
||||
service Authorization {
|
||||
// Determine if an event should be admitted to the relay
|
||||
rpc EventAdmit(EventRequest) returns (EventReply) {}
|
||||
}
|
||||
|
||||
message Event {
|
||||
bytes id = 1; // 32-byte SHA256 hash of serialized event
|
||||
bytes pubkey = 2; // 32-byte public key of event creator
|
||||
fixed64 created_at = 3; // UNIX timestamp provided by event creator
|
||||
uint64 kind = 4; // event kind
|
||||
string content = 5; // arbitrary event contents
|
||||
repeated TagEntry tags = 6; // event tag array
|
||||
bytes sig = 7; // 32-byte signature of the event id
|
||||
// Individual values for a single tag
|
||||
message TagEntry {
|
||||
repeated string values = 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Event data and metadata for authorization decisions
|
||||
message EventRequest {
|
||||
Event event =
|
||||
1; // the event to be admitted for further relay processing
|
||||
optional string ip_addr =
|
||||
2; // IP address of the client that submitted the event
|
||||
optional string origin =
|
||||
3; // HTTP origin header from the client, if one exists
|
||||
optional string user_agent =
|
||||
4; // HTTP user-agent header from the client, if one exists
|
||||
optional bytes auth_pubkey =
|
||||
5; // the public key associated with a NIP-42 AUTH'd session, if
|
||||
// authentication occurred
|
||||
optional Nip05Name nip05 =
|
||||
6; // NIP-05 address associated with the event pubkey, if it is
|
||||
// known and has been validated by the relay
|
||||
// A NIP_05 verification record
|
||||
message Nip05Name {
|
||||
string local = 1;
|
||||
string domain = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// A permit or deny decision
|
||||
enum Decision {
|
||||
DECISION_UNSPECIFIED = 0;
|
||||
DECISION_PERMIT = 1; // Admit this event for further processing
|
||||
DECISION_DENY = 2; // Deny persisting or propagating this event
|
||||
}
|
||||
|
||||
// Response to a event authorization request
|
||||
message EventReply {
|
||||
Decision decision = 1; // decision to enforce
|
||||
optional string message = 2; // informative message for the client
|
||||
}
|
@@ -68,13 +68,32 @@ http {
|
||||
server_name relay.example.com;
|
||||
ssl_certificate /etc/letsencrypt/live/relay.example.com/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/relay.example.com/privkey.pem;
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
ssl_protocols TLSv1.3 TLSv1.2;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_ecdh_curve secp521r1:secp384r1;
|
||||
ssl_ciphers EECDH+AESGCM:EECDH+AES256;
|
||||
|
||||
# Optional Diffie-Helmann parameters
|
||||
# Generate with openssl dhparam -out /etc/ssl/certs/dhparam.pem 4096
|
||||
#ssl_dhparam /etc/ssl/certs/dhparam.pem;
|
||||
|
||||
ssl_session_cache shared:TLS:2m;
|
||||
ssl_buffer_size 4k;
|
||||
|
||||
# OCSP stapling
|
||||
ssl_stapling on;
|
||||
ssl_stapling_verify on;
|
||||
resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001]; # Cloudflare
|
||||
|
||||
# Set HSTS to 365 days
|
||||
add_header Strict-Transport-Security 'max-age=31536000; includeSubDomains; preload' always;
|
||||
keepalive_timeout 70;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:8080;
|
||||
proxy_http_version 1.1;
|
||||
proxy_read_timeout 1d;
|
||||
proxy_send_timeout 1d;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
|
175
src/bin/bulkloader.rs
Normal file
175
src/bin/bulkloader.rs
Normal file
@@ -0,0 +1,175 @@
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use nostr_rs_relay::utils::is_lower_hex;
|
||||
use tracing::info;
|
||||
use nostr_rs_relay::config;
|
||||
use nostr_rs_relay::event::{Event,single_char_tagname};
|
||||
use nostr_rs_relay::error::{Error, Result};
|
||||
use nostr_rs_relay::repo::sqlite::{PooledConnection, build_pool};
|
||||
use nostr_rs_relay::repo::sqlite_migration::{curr_db_version, DB_VERSION};
|
||||
use rusqlite::{OpenFlags, Transaction};
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
use rusqlite::params;
|
||||
|
||||
/// Bulk load JSONL data from STDIN to the database specified in config.toml (or ./nostr.db as a default).
|
||||
/// The database must already exist, this will not create a new one.
|
||||
/// Tested against schema v13.
|
||||
|
||||
pub fn main() -> Result<()> {
|
||||
let _trace_sub = tracing_subscriber::fmt::try_init();
|
||||
println!("Nostr-rs-relay Bulk Loader");
|
||||
// check for a database file, or create one.
|
||||
let settings = config::Settings::new(&None);
|
||||
if !Path::new(&settings.database.data_directory).is_dir() {
|
||||
info!("Database directory does not exist");
|
||||
return Err(Error::DatabaseDirError);
|
||||
}
|
||||
// Get a database pool
|
||||
let pool = build_pool("bulk-loader", &settings, OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, 1,4,false);
|
||||
{
|
||||
// check for database schema version
|
||||
let mut conn: PooledConnection = pool.get()?;
|
||||
let version = curr_db_version(&mut conn)?;
|
||||
info!("current version is: {:?}", version);
|
||||
// ensure the schema version is current.
|
||||
if version != DB_VERSION {
|
||||
info!("version is not current, exiting");
|
||||
panic!("cannot write to schema other than v{DB_VERSION}");
|
||||
}
|
||||
}
|
||||
// this channel will contain parsed events ready to be inserted
|
||||
let (event_tx, event_rx) = mpsc::sync_channel(100_000);
|
||||
// Thread for reading events
|
||||
let _stdin_reader_handler = thread::spawn(move || {
|
||||
let stdin = io::stdin();
|
||||
for readline in stdin.lines() {
|
||||
if let Ok(line) = readline {
|
||||
// try to parse a nostr event
|
||||
let eres: Result<Event, serde_json::Error> = serde_json::from_str(&line);
|
||||
if let Ok(mut e) = eres {
|
||||
if let Ok(()) = e.validate() {
|
||||
e.build_index();
|
||||
//debug!("Event: {:?}", e);
|
||||
event_tx.send(Some(e)).ok();
|
||||
} else {
|
||||
info!("could not validate event");
|
||||
}
|
||||
} else {
|
||||
info!("error reading event: {:?}", eres);
|
||||
}
|
||||
} else {
|
||||
// error reading
|
||||
info!("error reading: {:?}", readline);
|
||||
}
|
||||
}
|
||||
info!("finished parsing events");
|
||||
event_tx.send(None).ok();
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
});
|
||||
let mut conn: PooledConnection = pool.get()?;
|
||||
let mut events_read = 0;
|
||||
let event_batch_size =50_000;
|
||||
let mut new_events = 0;
|
||||
let mut has_more_events = true;
|
||||
while has_more_events {
|
||||
// begin a transaction
|
||||
let tx = conn.transaction()?;
|
||||
// read in batch_size events and commit
|
||||
for _ in 0..event_batch_size {
|
||||
match event_rx.recv() {
|
||||
Ok(Some(e)) => {
|
||||
events_read += 1;
|
||||
// ignore ephemeral events
|
||||
if !(e.kind >= 20000 && e.kind < 30000) {
|
||||
match write_event(&tx, e) {
|
||||
Ok(c) => {
|
||||
new_events += c;
|
||||
},
|
||||
Err(e) => {
|
||||
info!("error inserting event: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
Ok(None) => {
|
||||
// signal that the sender will never produce more
|
||||
// events
|
||||
has_more_events=false;
|
||||
break;
|
||||
},
|
||||
Err(_) => {
|
||||
info!("sender is closed");
|
||||
// sender is done
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("committed {} events...", new_events);
|
||||
tx.commit()?;
|
||||
conn.execute_batch("pragma wal_checkpoint(truncate)")?;
|
||||
|
||||
}
|
||||
info!("processed {} events", events_read);
|
||||
info!("stored {} new events", new_events);
|
||||
// get a connection for writing events
|
||||
// read standard in.
|
||||
info!("finished reading input");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write an event and update the tag table.
|
||||
/// Assumes the event has its index built.
|
||||
fn write_event(tx: &Transaction, e: Event) -> Result<usize> {
|
||||
let id_blob = hex::decode(&e.id).ok();
|
||||
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
|
||||
let delegator_blob: Option<Vec<u8>> = e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
|
||||
let event_str = serde_json::to_string(&e).ok();
|
||||
// ignore if the event hash is a duplicate.
|
||||
let ins_count = tx.execute(
|
||||
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, strftime('%s','now'), FALSE);",
|
||||
params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str]
|
||||
)?;
|
||||
if ins_count == 0 {
|
||||
return Ok(0);
|
||||
}
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_id = tx.last_insert_rowid();
|
||||
// look at each event, and each tag, creating new tag entries if appropriate.
|
||||
for t in e.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagnamechar_opt = single_char_tagname(tagname);
|
||||
if tagnamechar_opt.is_none() {
|
||||
continue;
|
||||
}
|
||||
// safe because len was > 1
|
||||
let tagval = t.get(1).unwrap();
|
||||
// insert as BLOB if we can restore it losslessly.
|
||||
// this means it needs to be even length and lowercase.
|
||||
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tagname, hex::decode(tagval).ok()],
|
||||
)?;
|
||||
} else {
|
||||
// otherwise, insert as text
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tagname, &tagval],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
if e.is_replaceable() {
|
||||
//let query = "SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1;";
|
||||
//let count: usize = tx.query_row(query, params![e.kind, pubkey_blob], |row| row.get(0))?;
|
||||
//info!("found {} rows that /would/ be preserved", count);
|
||||
match tx.execute(
|
||||
"DELETE FROM event WHERE kind=? and author=? and id NOT IN (SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1);",
|
||||
params![e.kind, pubkey_blob, e.kind, pubkey_blob],
|
||||
) {
|
||||
Ok(_) => {},
|
||||
Err(x) => {info!("error deleting replaceable event: {:?}",x);}
|
||||
}
|
||||
}
|
||||
Ok(ins_count)
|
||||
}
|
20
src/cli.rs
Normal file
20
src/cli.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
use clap::Parser;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(about = "A nostr relay written in Rust", author = env!("CARGO_PKG_AUTHORS"), version = env!("CARGO_PKG_VERSION"))]
|
||||
pub struct CLIArgs {
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
help = "Use the <directory> as the location of the database",
|
||||
required = false,
|
||||
)]
|
||||
pub db: Option<String>,
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
help = "Use the <file name> as the location of the config file",
|
||||
required = false,
|
||||
)]
|
||||
pub config: Option<String>,
|
||||
}
|
@@ -18,9 +18,17 @@ pub struct Info {
|
||||
#[allow(unused)]
|
||||
pub struct Database {
|
||||
pub data_directory: String,
|
||||
pub engine: String,
|
||||
pub in_memory: bool,
|
||||
pub min_conn: u32,
|
||||
pub max_conn: u32,
|
||||
pub connection: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Grpc {
|
||||
pub event_admission_server: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -143,6 +151,7 @@ pub struct Settings {
|
||||
pub info: Info,
|
||||
pub diagnostics: Diagnostics,
|
||||
pub database: Database,
|
||||
pub grpc: Grpc,
|
||||
pub network: Network,
|
||||
pub limits: Limits,
|
||||
pub authorization: Authorization,
|
||||
@@ -153,10 +162,10 @@ pub struct Settings {
|
||||
|
||||
impl Settings {
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
pub fn new(config_file_name: &Option<String>) -> Self {
|
||||
let default_settings = Self::default();
|
||||
// attempt to construct settings with file
|
||||
let from_file = Self::new_from_default(&default_settings);
|
||||
let from_file = Self::new_from_default(&default_settings, config_file_name);
|
||||
match from_file {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
@@ -166,13 +175,19 @@ impl Settings {
|
||||
}
|
||||
}
|
||||
|
||||
fn new_from_default(default: &Settings) -> Result<Self, ConfigError> {
|
||||
|
||||
fn new_from_default(default: &Settings, config_file_name: &Option<String>) -> Result<Self, ConfigError> {
|
||||
let default_config_file_name = "config.toml".to_string();
|
||||
let config: &String = match config_file_name {
|
||||
Some(value) => value,
|
||||
None => &default_config_file_name
|
||||
};
|
||||
let builder = Config::builder();
|
||||
let config: Config = builder
|
||||
// use defaults
|
||||
// use defaults
|
||||
.add_source(Config::try_from(default)?)
|
||||
// override with file contents
|
||||
.add_source(File::with_name("config.toml"))
|
||||
// override with file contents
|
||||
.add_source(File::with_name(config))
|
||||
.build()?;
|
||||
let mut settings: Settings = config.try_deserialize()?;
|
||||
// ensure connection pool size is logical
|
||||
@@ -206,9 +221,14 @@ impl Default for Settings {
|
||||
diagnostics: Diagnostics { tracing: false },
|
||||
database: Database {
|
||||
data_directory: ".".to_owned(),
|
||||
engine: "sqlite".to_owned(),
|
||||
in_memory: false,
|
||||
min_conn: 4,
|
||||
max_conn: 8,
|
||||
connection: "".to_owned(),
|
||||
},
|
||||
grpc: Grpc {
|
||||
event_admission_server: None,
|
||||
},
|
||||
network: Network {
|
||||
port: 8080,
|
||||
|
12
src/conn.rs
12
src/conn.rs
@@ -14,7 +14,7 @@ const MAX_SUBSCRIPTION_ID_LEN: usize = 256;
|
||||
/// State for a client connection
|
||||
pub struct ClientConn {
|
||||
/// Client IP (either from socket, or configured proxy header
|
||||
client_ip: String,
|
||||
client_ip_addr: String,
|
||||
/// Unique client identifier generated at connection time
|
||||
client_id: Uuid,
|
||||
/// The current set of active client subscriptions
|
||||
@@ -32,22 +32,22 @@ impl Default for ClientConn {
|
||||
impl ClientConn {
|
||||
/// Create a new, empty connection state.
|
||||
#[must_use]
|
||||
pub fn new(client_ip: String) -> Self {
|
||||
pub fn new(client_ip_addr: String) -> Self {
|
||||
let client_id = Uuid::new_v4();
|
||||
ClientConn {
|
||||
client_ip,
|
||||
client_ip_addr,
|
||||
client_id,
|
||||
subscriptions: HashMap::new(),
|
||||
max_subs: 32,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
|
||||
#[must_use] pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
|
||||
&self.subscriptions
|
||||
}
|
||||
|
||||
/// Check if the given subscription already exists
|
||||
pub fn has_subscription(&self, sub: &Subscription) -> bool {
|
||||
#[must_use] pub fn has_subscription(&self, sub: &Subscription) -> bool {
|
||||
self.subscriptions.values().any(|x| x == sub)
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ impl ClientConn {
|
||||
|
||||
#[must_use]
|
||||
pub fn ip(&self) -> &str {
|
||||
&self.client_ip
|
||||
&self.client_ip_addr
|
||||
}
|
||||
|
||||
/// Add a new subscription for this connection.
|
||||
|
@@ -84,7 +84,7 @@ pub struct ConditionQuery {
|
||||
}
|
||||
|
||||
impl ConditionQuery {
|
||||
pub fn allows_event(&self, event: &Event) -> bool {
|
||||
#[must_use] pub fn allows_event(&self, event: &Event) -> bool {
|
||||
// check each condition, to ensure that the event complies
|
||||
// with the restriction.
|
||||
for c in &self.conditions {
|
||||
@@ -101,14 +101,14 @@ impl ConditionQuery {
|
||||
}
|
||||
|
||||
// Verify that the delegator approved the delegation; return a ConditionQuery if so.
|
||||
pub fn validate_delegation(
|
||||
#[must_use] pub fn validate_delegation(
|
||||
delegator: &str,
|
||||
delegatee: &str,
|
||||
cond_query: &str,
|
||||
sigstr: &str,
|
||||
) -> Option<ConditionQuery> {
|
||||
// form the token
|
||||
let tok = format!("nostr:delegation:{}:{}", delegatee, cond_query);
|
||||
let tok = format!("nostr:delegation:{delegatee}:{cond_query}");
|
||||
// form SHA256 hash
|
||||
let digest: sha256::Hash = sha256::Hash::hash(tok.as_bytes());
|
||||
let sig = schnorr::Signature::from_str(sigstr).unwrap();
|
||||
@@ -133,8 +133,8 @@ pub fn validate_delegation(
|
||||
}
|
||||
|
||||
/// Parsed delegation condition
|
||||
/// see https://github.com/nostr-protocol/nips/pull/28#pullrequestreview-1084903800
|
||||
/// An example complex condition would be: kind=1,2,3&created_at<1665265999
|
||||
/// see <https://github.com/nostr-protocol/nips/pull/28#pullrequestreview-1084903800>
|
||||
/// An example complex condition would be: `kind=1,2,3&created_at<1665265999`
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Condition {
|
||||
pub field: Field,
|
||||
@@ -144,7 +144,7 @@ pub struct Condition {
|
||||
|
||||
impl Condition {
|
||||
/// Check if this condition allows the given event to be delegated
|
||||
pub fn allows_event(&self, event: &Event) -> bool {
|
||||
#[must_use] pub fn allows_event(&self, event: &Event) -> bool {
|
||||
// determine what the right-hand side of the operator is
|
||||
let resolved_field = match &self.field {
|
||||
Field::Kind => event.kind,
|
||||
@@ -323,7 +323,7 @@ mod tests {
|
||||
Condition {
|
||||
field: Field::CreatedAt,
|
||||
operator: Operator::LessThan,
|
||||
values: vec![1665867123],
|
||||
values: vec![1_665_867_123],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
23
src/error.rs
23
src/error.rs
@@ -48,6 +48,10 @@ pub enum Error {
|
||||
DatabaseDirError,
|
||||
#[error("Database Connection Pool Error")]
|
||||
DatabasePoolError(r2d2::Error),
|
||||
#[error("SQL error")]
|
||||
SqlxError(sqlx::Error),
|
||||
#[error("Database Connection Pool Error")]
|
||||
SqlxDatabasePoolError(sqlx::Error),
|
||||
#[error("Custom Error : {0}")]
|
||||
CustomError(String),
|
||||
#[error("Task join error")]
|
||||
@@ -58,6 +62,12 @@ pub enum Error {
|
||||
HexError(hex::FromHexError),
|
||||
#[error("Delegation parse error")]
|
||||
DelegationParseError,
|
||||
#[error("Channel closed error")]
|
||||
ChannelClosed,
|
||||
#[error("Authz error")]
|
||||
AuthzError,
|
||||
#[error("Tonic GRPC error")]
|
||||
TonicError(tonic::Status),
|
||||
#[error("Unknown/Undocumented")]
|
||||
UnknownError,
|
||||
}
|
||||
@@ -100,6 +110,12 @@ impl From<rusqlite::Error> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<sqlx::Error> for Error {
|
||||
fn from(d: sqlx::Error) -> Self {
|
||||
Error::SqlxDatabasePoolError(d)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Error> for Error {
|
||||
/// Wrap JSON error
|
||||
fn from(r: serde_json::Error) -> Self {
|
||||
@@ -120,3 +136,10 @@ impl From<config::ConfigError> for Error {
|
||||
Error::ConfigError(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<tonic::Status> for Error {
|
||||
/// Wrap Config error
|
||||
fn from(r: tonic::Status) -> Self {
|
||||
Error::TonicError(r)
|
||||
}
|
||||
}
|
||||
|
215
src/event.rs
215
src/event.rs
@@ -1,6 +1,6 @@
|
||||
//! Event parsing and validation
|
||||
use crate::delegation::validate_delegation;
|
||||
use crate::error::Error::*;
|
||||
use crate::error::Error::{CommandUnknownError, EventCouldNotCanonicalize, EventInvalidId, EventInvalidSignature, EventMalformedPubkey};
|
||||
use crate::error::Result;
|
||||
use crate::nip05;
|
||||
use crate::utils::unix_time;
|
||||
@@ -28,7 +28,7 @@ pub struct EventCmd {
|
||||
}
|
||||
|
||||
impl EventCmd {
|
||||
pub fn event_id(&self) -> &str {
|
||||
#[must_use] pub fn event_id(&self) -> &str {
|
||||
&self.event.id
|
||||
}
|
||||
}
|
||||
@@ -65,7 +65,7 @@ where
|
||||
}
|
||||
|
||||
/// Attempt to form a single-char tag name.
|
||||
pub fn single_char_tagname(tagname: &str) -> Option<char> {
|
||||
#[must_use] pub fn single_char_tagname(tagname: &str) -> Option<char> {
|
||||
// We return the tag character if and only if the tagname consists
|
||||
// of a single char.
|
||||
let mut tagnamechars = tagname.chars();
|
||||
@@ -87,22 +87,22 @@ pub fn single_char_tagname(tagname: &str) -> Option<char> {
|
||||
impl From<EventCmd> for Result<Event> {
|
||||
fn from(ec: EventCmd) -> Result<Event> {
|
||||
// ensure command is correct
|
||||
if ec.cmd != "EVENT" {
|
||||
Err(CommandUnknownError)
|
||||
} else {
|
||||
if ec.cmd == "EVENT" {
|
||||
ec.event.validate().map(|_| {
|
||||
let mut e = ec.event;
|
||||
e.build_index();
|
||||
e.update_delegation();
|
||||
e
|
||||
})
|
||||
} else {
|
||||
Err(CommandUnknownError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Event {
|
||||
#[cfg(test)]
|
||||
pub fn simple_event() -> Event {
|
||||
#[must_use] pub fn simple_event() -> Event {
|
||||
Event {
|
||||
id: "0".to_owned(),
|
||||
pubkey: "0".to_owned(),
|
||||
@@ -116,12 +116,49 @@ impl Event {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_kind_metadata(&self) -> bool {
|
||||
#[must_use] pub fn is_kind_metadata(&self) -> bool {
|
||||
self.kind == 0
|
||||
}
|
||||
|
||||
/// Should this event be persisted?
|
||||
#[must_use] pub fn is_ephemeral(&self) -> bool {
|
||||
self.kind >= 20000 && self.kind < 30000
|
||||
}
|
||||
|
||||
/// Should this event be replaced with newer timestamps from same author?
|
||||
#[must_use] pub fn is_replaceable(&self) -> bool {
|
||||
self.kind == 0 || self.kind == 3 || self.kind == 41 || (self.kind >= 10000 && self.kind < 20000)
|
||||
}
|
||||
|
||||
/// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values?
|
||||
#[must_use] pub fn is_param_replaceable(&self) -> bool {
|
||||
self.kind >= 30000 && self.kind < 40000
|
||||
}
|
||||
|
||||
/// What is the replaceable `d` tag value?
|
||||
|
||||
/// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values?
|
||||
#[must_use] pub fn distinct_param(&self) -> Option<String> {
|
||||
if self.is_param_replaceable() {
|
||||
let default = "".to_string();
|
||||
let dvals:Vec<&String> = self.tags
|
||||
.iter()
|
||||
.filter(|x| !x.is_empty())
|
||||
.filter(|x| x.get(0).unwrap() == "d")
|
||||
.map(|x| x.get(1).unwrap_or(&default)).take(1)
|
||||
.collect();
|
||||
let dval_first = dvals.get(0);
|
||||
match dval_first {
|
||||
Some(_) => {dval_first.map(|x| x.to_string())},
|
||||
None => Some(default)
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Pull a NIP-05 Name out of the event, if one exists
|
||||
pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
|
||||
#[must_use] pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
|
||||
if self.is_kind_metadata() {
|
||||
// very quick check if we should attempt to parse this json
|
||||
if self.content.contains("\"nip05\"") {
|
||||
@@ -138,7 +175,7 @@ impl Event {
|
||||
// is this event delegated (properly)?
|
||||
// does the signature match, and are conditions valid?
|
||||
// if so, return an alternate author for the event
|
||||
pub fn delegated_author(&self) -> Option<String> {
|
||||
#[must_use] pub fn delegated_author(&self) -> Option<String> {
|
||||
// is there a delegation tag?
|
||||
let delegation_tag: Vec<String> = self
|
||||
.tags
|
||||
@@ -146,8 +183,7 @@ impl Event {
|
||||
.filter(|x| x.len() == 4)
|
||||
.filter(|x| x.get(0).unwrap() == "delegation")
|
||||
.take(1)
|
||||
.next()?
|
||||
.to_vec(); // get first tag
|
||||
.next()?.clone(); // get first tag
|
||||
|
||||
//let delegation_tag = self.tag_values_by_name("delegation");
|
||||
// delegation tags should have exactly 3 elements after the name (pubkey, condition, sig)
|
||||
@@ -176,11 +212,11 @@ impl Event {
|
||||
}
|
||||
|
||||
/// Update delegation status
|
||||
fn update_delegation(&mut self) {
|
||||
pub fn update_delegation(&mut self) {
|
||||
self.delegated_by = self.delegated_author();
|
||||
}
|
||||
/// Build an event tag index
|
||||
fn build_index(&mut self) {
|
||||
pub fn build_index(&mut self) {
|
||||
// if there are no tags; just leave the index as None
|
||||
if self.tags.is_empty() {
|
||||
return;
|
||||
@@ -207,24 +243,24 @@ impl Event {
|
||||
}
|
||||
|
||||
/// Create a short event identifier, suitable for logging.
|
||||
pub fn get_event_id_prefix(&self) -> String {
|
||||
#[must_use] pub fn get_event_id_prefix(&self) -> String {
|
||||
self.id.chars().take(8).collect()
|
||||
}
|
||||
pub fn get_author_prefix(&self) -> String {
|
||||
#[must_use] pub fn get_author_prefix(&self) -> String {
|
||||
self.pubkey.chars().take(8).collect()
|
||||
}
|
||||
|
||||
/// Retrieve tag initial values across all tags matching the name
|
||||
pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
|
||||
#[must_use] pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
|
||||
self.tags
|
||||
.iter()
|
||||
.filter(|x| x.len() > 1)
|
||||
.filter(|x| x.get(0).unwrap() == tag_name)
|
||||
.map(|x| x.get(1).unwrap().to_owned())
|
||||
.map(|x| x.get(1).unwrap().clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
|
||||
#[must_use] pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
|
||||
if let Some(allowable_future) = reject_future_seconds {
|
||||
let curr_time = unix_time();
|
||||
// calculate difference, plus how far future we allow
|
||||
@@ -256,7 +292,7 @@ impl Event {
|
||||
let c = c_opt.unwrap();
|
||||
// * compute the sha256sum.
|
||||
let digest: sha256::Hash = sha256::Hash::hash(c.as_bytes());
|
||||
let hex_digest = format!("{:x}", digest);
|
||||
let hex_digest = format!("{digest:x}");
|
||||
// * ensure the id matches the computed sha256sum.
|
||||
if self.id != hex_digest {
|
||||
debug!("event id does not match digest");
|
||||
@@ -286,7 +322,7 @@ impl Event {
|
||||
let id = Number::from(0_u64);
|
||||
c.push(serde_json::Value::Number(id));
|
||||
// public key
|
||||
c.push(Value::String(self.pubkey.to_owned()));
|
||||
c.push(Value::String(self.pubkey.clone()));
|
||||
// creation time
|
||||
let created_at = Number::from(self.created_at);
|
||||
c.push(serde_json::Value::Number(created_at));
|
||||
@@ -296,7 +332,7 @@ impl Event {
|
||||
// tags
|
||||
c.push(self.tags_to_canonical());
|
||||
// content
|
||||
c.push(Value::String(self.content.to_owned()));
|
||||
c.push(Value::String(self.content.clone()));
|
||||
serde_json::to_string(&Value::Array(c)).ok()
|
||||
}
|
||||
|
||||
@@ -304,11 +340,11 @@ impl Event {
|
||||
fn tags_to_canonical(&self) -> Value {
|
||||
let mut tags = Vec::<Value>::new();
|
||||
// iterate over self tags,
|
||||
for t in self.tags.iter() {
|
||||
for t in &self.tags {
|
||||
// each tag is a vec of strings
|
||||
let mut a = Vec::<Value>::new();
|
||||
for v in t.iter() {
|
||||
a.push(serde_json::Value::String(v.to_owned()));
|
||||
a.push(serde_json::Value::String(v.clone()));
|
||||
}
|
||||
tags.push(serde_json::Value::Array(a));
|
||||
}
|
||||
@@ -316,7 +352,7 @@ impl Event {
|
||||
}
|
||||
|
||||
/// Determine if the given tag and value set intersect with tags in this event.
|
||||
pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
|
||||
#[must_use] pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
|
||||
match &self.tagidx {
|
||||
// check if this is indexable tagname
|
||||
Some(idx) => match idx.get(&tagname) {
|
||||
@@ -355,7 +391,7 @@ mod tests {
|
||||
fn empty_event_tag_match() {
|
||||
let event = Event::simple_event();
|
||||
assert!(!event
|
||||
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
|
||||
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -408,7 +444,7 @@ mod tests {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
created_at: 501_234,
|
||||
kind: 1,
|
||||
tags: vec![],
|
||||
content: "this is a test".to_owned(),
|
||||
@@ -426,7 +462,7 @@ mod tests {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
created_at: 501_234,
|
||||
kind: 1,
|
||||
tags: vec![
|
||||
vec!["j".to_owned(), "abc".to_owned()],
|
||||
@@ -453,7 +489,7 @@ mod tests {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
created_at: 501_234,
|
||||
kind: 1,
|
||||
tags: vec![
|
||||
vec!["j".to_owned(), "abc".to_owned()],
|
||||
@@ -480,7 +516,7 @@ mod tests {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
delegated_by: None,
|
||||
created_at: 501234,
|
||||
created_at: 501_234,
|
||||
kind: 1,
|
||||
tags: vec![
|
||||
vec!["#e".to_owned(), "aoeu".to_owned()],
|
||||
@@ -499,4 +535,123 @@ mod tests {
|
||||
let expected = Some(expected_json.to_owned());
|
||||
assert_eq!(c, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ephemeral_event() {
|
||||
let mut event = Event::simple_event();
|
||||
event.kind=20000;
|
||||
assert!(event.is_ephemeral());
|
||||
event.kind=29999;
|
||||
assert!(event.is_ephemeral());
|
||||
event.kind=30000;
|
||||
assert!(!event.is_ephemeral());
|
||||
event.kind=19999;
|
||||
assert!(!event.is_ephemeral());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replaceable_event() {
|
||||
let mut event = Event::simple_event();
|
||||
event.kind=0;
|
||||
assert!(event.is_replaceable());
|
||||
event.kind=3;
|
||||
assert!(event.is_replaceable());
|
||||
event.kind=10000;
|
||||
assert!(event.is_replaceable());
|
||||
event.kind=19999;
|
||||
assert!(event.is_replaceable());
|
||||
event.kind=20000;
|
||||
assert!(!event.is_replaceable());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_event() {
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
assert!(event.is_param_replaceable());
|
||||
event.kind = 39999;
|
||||
assert!(event.is_param_replaceable());
|
||||
event.kind = 29999;
|
||||
assert!(!event.is_param_replaceable());
|
||||
event.kind = 40000;
|
||||
assert!(!event.is_param_replaceable());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_value_case_1() {
|
||||
// NIP case #1: "tags":[["d",""]]
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned(), "".to_owned()]];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_value_case_2() {
|
||||
// NIP case #2: "tags":[]: implicit d tag with empty value
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_value_case_3() {
|
||||
// NIP case #3: "tags":[["d"]]: implicit empty value ""
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned()]];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_value_case_4() {
|
||||
// NIP case #4: "tags":[["d",""],["d","not empty"]]: only first d tag is considered
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned(), "".to_string()],
|
||||
vec!["d".to_owned(), "not empty".to_string()]
|
||||
];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_value_case_4b() {
|
||||
// Variation of #4 with
|
||||
// NIP case #4: "tags":[["d","not empty"],["d",""]]: only first d tag is considered
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned(), "not empty".to_string()],
|
||||
vec!["d".to_owned(), "".to_string()]
|
||||
];
|
||||
assert_eq!(event.distinct_param(), Some("not empty".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_value_case_5() {
|
||||
// NIP case #5: "tags":[["d"],["d","some value"]]: only first d tag is considered
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned()],
|
||||
vec!["d".to_owned(), "second value".to_string()],
|
||||
vec!["d".to_owned(), "third value".to_string()]
|
||||
];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn param_replaceable_value_case_6() {
|
||||
// NIP case #6: "tags":[["e"]]: same as no tags
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["e".to_owned()],
|
||||
];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -1,5 +1,5 @@
|
||||
//! Utilities for searching hexadecimal
|
||||
use crate::utils::is_hex;
|
||||
use crate::utils::{is_hex};
|
||||
use hex;
|
||||
|
||||
/// Types of hexadecimal queries.
|
||||
@@ -19,16 +19,15 @@ fn is_all_fs(s: &str) -> bool {
|
||||
}
|
||||
|
||||
/// Find the next hex sequence greater than the argument.
|
||||
pub fn hex_range(s: &str) -> Option<HexSearch> {
|
||||
// handle special cases
|
||||
if !is_hex(s) || s.len() > 64 {
|
||||
#[must_use] pub fn hex_range(s: &str) -> Option<HexSearch> {
|
||||
let mut hash_base = s.to_owned();
|
||||
if !is_hex(&hash_base) || hash_base.len() > 64 {
|
||||
return None;
|
||||
}
|
||||
if s.len() == 64 {
|
||||
return Some(HexSearch::Exact(hex::decode(s).ok()?));
|
||||
if hash_base.len() == 64 {
|
||||
return Some(HexSearch::Exact(hex::decode(&hash_base).ok()?));
|
||||
}
|
||||
// if s is odd, add a zero
|
||||
let mut hash_base = s.to_owned();
|
||||
let mut odd = hash_base.len() % 2 != 0;
|
||||
if odd {
|
||||
// extend the string to make it even
|
||||
@@ -57,8 +56,9 @@ pub fn hex_range(s: &str) -> Option<HexSearch> {
|
||||
} else if odd {
|
||||
// check if first char in this byte is NOT 'f'
|
||||
if b < 240 {
|
||||
upper[byte_len] = b + 16; // bump up the first character in this byte
|
||||
// increment done, stop iterating through the vec
|
||||
// bump up the first character in this byte
|
||||
upper[byte_len] = b + 16;
|
||||
// increment done, stop iterating through the vec
|
||||
break;
|
||||
}
|
||||
// if it is 'f', reset the byte to 0 and do a carry
|
||||
|
@@ -35,9 +35,9 @@ impl From<config::Info> for RelayInfo {
|
||||
description: i.description,
|
||||
pubkey: i.pubkey,
|
||||
contact: i.contact,
|
||||
supported_nips: Some(vec![1, 2, 9, 11, 12, 15, 16, 20, 22]),
|
||||
supported_nips: Some(vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 33]),
|
||||
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
|
||||
version: CARGO_PKG_VERSION.map(|x| x.to_owned()),
|
||||
version: CARGO_PKG_VERSION.map(std::borrow::ToOwned::to_owned),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,3 +1,4 @@
|
||||
pub mod cli;
|
||||
pub mod close;
|
||||
pub mod config;
|
||||
pub mod conn;
|
||||
@@ -8,8 +9,9 @@ pub mod event;
|
||||
pub mod hexrange;
|
||||
pub mod info;
|
||||
pub mod nip05;
|
||||
pub mod nauthz;
|
||||
pub mod notice;
|
||||
pub mod schema;
|
||||
pub mod repo;
|
||||
pub mod subscription;
|
||||
pub mod utils;
|
||||
// Public API for creating relays programatically
|
||||
|
84
src/main.rs
84
src/main.rs
@@ -1,77 +1,51 @@
|
||||
//! Server process
|
||||
|
||||
use clap::Parser;
|
||||
use nostr_rs_relay::cli::CLIArgs;
|
||||
use nostr_rs_relay::config;
|
||||
use nostr_rs_relay::server::start_server;
|
||||
use std::env;
|
||||
use std::sync::mpsc as syncmpsc;
|
||||
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
|
||||
use std::thread;
|
||||
use tracing::info;
|
||||
|
||||
use console_subscriber::ConsoleLayer;
|
||||
|
||||
/// Return a requested DB name from command line arguments.
|
||||
fn db_from_args(args: &[String]) -> Option<String> {
|
||||
if args.len() == 3 && args.get(1) == Some(&"--db".to_owned()) {
|
||||
return args.get(2).map(std::clone::Clone::clone);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn print_version() {
|
||||
println!("{} v{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION"));
|
||||
}
|
||||
|
||||
fn print_help() {
|
||||
println!("Usage: nostr-rs-relay [OPTION]...\n");
|
||||
println!("Options:");
|
||||
println!(" --help Show this help message and exit");
|
||||
println!(" --version Show version information and exit");
|
||||
println!(" --db <directory> Use the <directory> as the location of the database");
|
||||
}
|
||||
|
||||
/// Start running a Nostr relay server.
|
||||
fn main() {
|
||||
let args = CLIArgs::parse();
|
||||
|
||||
// get config file name from args
|
||||
let config_file_arg = args.config;
|
||||
|
||||
// configure settings from the config file (defaults to config.toml)
|
||||
// replace default settings with those read from the config file
|
||||
let mut settings = config::Settings::new(&config_file_arg);
|
||||
|
||||
// setup tracing
|
||||
let _trace_sub = tracing_subscriber::fmt::try_init();
|
||||
info!("Starting up from main");
|
||||
// get database directory from args
|
||||
let args: Vec<String> = env::args().collect();
|
||||
|
||||
let help_flag: bool = args.contains(&"--help".to_owned());
|
||||
// if --help flag was passed, display help and exit
|
||||
if help_flag {
|
||||
print_help();
|
||||
return;
|
||||
}
|
||||
|
||||
let version_flag: bool = args.contains(&"--version".to_owned());
|
||||
// if --version flag was passed, display version and exit
|
||||
if version_flag {
|
||||
print_version();
|
||||
return;
|
||||
}
|
||||
|
||||
let db_dir: Option<String> = db_from_args(&args);
|
||||
// configure settings from config.toml
|
||||
// replace default settings with those read from config.toml
|
||||
let mut settings = config::Settings::new();
|
||||
|
||||
if settings.diagnostics.tracing {
|
||||
// enable tracing with tokio-console
|
||||
ConsoleLayer::builder().with_default_env().init();
|
||||
} else {
|
||||
// standard logging
|
||||
tracing_subscriber::fmt::try_init().unwrap();
|
||||
}
|
||||
// update with database location
|
||||
if let Some(db) = db_dir {
|
||||
settings.database.data_directory = db;
|
||||
}
|
||||
info!("Starting up from main");
|
||||
|
||||
// get database directory from args
|
||||
let db_dir_arg = args.db;
|
||||
|
||||
// update with database location from args, if provided
|
||||
if let Some(db_dir) = db_dir_arg {
|
||||
settings.database.data_directory = db_dir;
|
||||
}
|
||||
// we should have a 'control plane' channel to monitor and bump
|
||||
// the server. this will let us do stuff like clear the database,
|
||||
// shutdown, etc.; for now all this does is initiate shutdown if
|
||||
// `()` is sent. This will change in the future, this is just a
|
||||
// stopgap to shutdown the relay when it is used as a library.
|
||||
let (_, ctrl_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
|
||||
// run this in a new thread
|
||||
let handle = thread::spawn(|| {
|
||||
// we should have a 'control plane' channel to monitor and bump the server.
|
||||
// this will let us do stuff like clear the database, shutdown, etc.
|
||||
let _svr = start_server(settings, ctrl_rx);
|
||||
let handle = thread::spawn(move || {
|
||||
let _svr = start_server(&settings, ctrl_rx);
|
||||
});
|
||||
// block on nostr thread to finish.
|
||||
handle.join().unwrap();
|
||||
|
110
src/nauthz.rs
Normal file
110
src/nauthz.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
use crate::error::{Error, Result};
|
||||
use crate::{event::Event, nip05::Nip05Name};
|
||||
use nauthz_grpc::authorization_client::AuthorizationClient;
|
||||
use nauthz_grpc::event::TagEntry;
|
||||
use nauthz_grpc::{Decision, Event as GrpcEvent, EventReply, EventRequest};
|
||||
use tracing::{info, warn};
|
||||
|
||||
pub mod nauthz_grpc {
|
||||
tonic::include_proto!("nauthz");
|
||||
}
|
||||
|
||||
// A decision for the DB to act upon
|
||||
pub trait AuthzDecision: Send + Sync {
|
||||
fn permitted(&self) -> bool;
|
||||
fn message(&self) -> Option<String>;
|
||||
}
|
||||
|
||||
impl AuthzDecision for EventReply {
|
||||
fn permitted(&self) -> bool {
|
||||
self.decision == Decision::Permit as i32
|
||||
}
|
||||
fn message(&self) -> Option<String> {
|
||||
self.message.clone()
|
||||
}
|
||||
}
|
||||
|
||||
// A connection to an event admission GRPC server
|
||||
pub struct EventAuthzService {
|
||||
server_addr: String,
|
||||
conn: Option<AuthorizationClient<tonic::transport::Channel>>,
|
||||
}
|
||||
|
||||
// conversion of Nip05Names into GRPC type
|
||||
impl std::convert::From<Nip05Name> for nauthz_grpc::event_request::Nip05Name {
|
||||
fn from(value: Nip05Name) -> Self {
|
||||
nauthz_grpc::event_request::Nip05Name {
|
||||
local: value.local.clone(),
|
||||
domain: value.domain.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// conversion of event tags into gprc struct
|
||||
fn tags_to_protobuf(tags: &Vec<Vec<String>>) -> Vec<TagEntry> {
|
||||
tags.iter()
|
||||
.map(|x| TagEntry { values: x.clone() })
|
||||
.collect()
|
||||
}
|
||||
|
||||
impl EventAuthzService {
|
||||
pub async fn connect(server_addr: &str) -> EventAuthzService {
|
||||
let mut eas = EventAuthzService {
|
||||
server_addr: server_addr.to_string(),
|
||||
conn: None,
|
||||
};
|
||||
eas.ready_connection().await;
|
||||
eas
|
||||
}
|
||||
|
||||
pub async fn ready_connection(self: &mut Self) {
|
||||
if self.conn.is_none() {
|
||||
let client = AuthorizationClient::connect(self.server_addr.to_string()).await;
|
||||
if let Err(ref msg) = client {
|
||||
warn!("could not connect to nostr authz GRPC server: {:?}", msg);
|
||||
} else {
|
||||
info!("connected to nostr authorization GRPC server");
|
||||
}
|
||||
self.conn = client.ok();
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn admit_event(
|
||||
self: &mut Self,
|
||||
event: &Event,
|
||||
ip: &str,
|
||||
origin: Option<String>,
|
||||
user_agent: Option<String>,
|
||||
nip05: Option<Nip05Name>,
|
||||
) -> Result<Box<dyn AuthzDecision>> {
|
||||
self.ready_connection().await;
|
||||
let id_blob = hex::decode(&event.id)?;
|
||||
let pubkey_blob = hex::decode(&event.pubkey)?;
|
||||
let sig_blob = hex::decode(&event.sig)?;
|
||||
if let Some(ref mut c) = self.conn {
|
||||
let gevent = GrpcEvent {
|
||||
id: id_blob,
|
||||
pubkey: pubkey_blob,
|
||||
sig: sig_blob,
|
||||
created_at: event.created_at,
|
||||
kind: event.kind,
|
||||
content: event.content.clone(),
|
||||
tags: tags_to_protobuf(&event.tags),
|
||||
};
|
||||
let svr_res = c
|
||||
.event_admit(EventRequest {
|
||||
event: Some(gevent),
|
||||
ip_addr: Some(ip.to_string()),
|
||||
origin,
|
||||
user_agent,
|
||||
auth_pubkey: None,
|
||||
nip05: nip05.map(|x| nauthz_grpc::event_request::Nip05Name::from(x)),
|
||||
})
|
||||
.await?;
|
||||
let reply = svr_res.into_inner();
|
||||
return Ok(Box::new(reply));
|
||||
} else {
|
||||
return Err(Error::AuthzError);
|
||||
}
|
||||
}
|
||||
}
|
406
src/nip05.rs
406
src/nip05.rs
@@ -5,16 +5,14 @@
|
||||
//! consumes a stream of metadata events, and keeps a database table
|
||||
//! updated with the current NIP-05 verification status.
|
||||
use crate::config::VerifiedUsers;
|
||||
use crate::db;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::event::Event;
|
||||
use crate::utils::unix_time;
|
||||
use crate::repo::NostrRepo;
|
||||
use std::sync::Arc;
|
||||
use hyper::body::HttpBody;
|
||||
use hyper::client::connect::HttpConnector;
|
||||
use hyper::Client;
|
||||
use hyper_tls::HttpsConnector;
|
||||
use rand::Rng;
|
||||
use rusqlite::params;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use std::time::SystemTime;
|
||||
@@ -23,14 +21,12 @@ use tracing::{debug, info, warn};
|
||||
|
||||
/// NIP-05 verifier state
|
||||
pub struct Verifier {
|
||||
/// Repository for saving/retrieving events and records
|
||||
repo: Arc<dyn NostrRepo>,
|
||||
/// Metadata events for us to inspect
|
||||
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
|
||||
/// Newly validated events get written and then broadcast on this channel to subscribers
|
||||
event_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
/// SQLite read query pool
|
||||
read_pool: db::SqlitePool,
|
||||
/// SQLite write query pool
|
||||
write_pool: db::SqlitePool,
|
||||
/// Settings
|
||||
settings: crate::config::Settings,
|
||||
/// HTTP client
|
||||
@@ -46,13 +42,13 @@ pub struct Verifier {
|
||||
/// A NIP-05 identifier is a local part and domain.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Nip05Name {
|
||||
local: String,
|
||||
domain: String,
|
||||
pub local: String,
|
||||
pub domain: String,
|
||||
}
|
||||
|
||||
impl Nip05Name {
|
||||
/// Does this name represent the entire domain?
|
||||
pub fn is_domain_only(&self) -> bool {
|
||||
#[must_use] pub fn is_domain_only(&self) -> bool {
|
||||
self.local == "_"
|
||||
}
|
||||
|
||||
@@ -62,8 +58,8 @@ impl Nip05Name {
|
||||
"https://{}/.well-known/nostr.json?name={}",
|
||||
self.domain, self.local
|
||||
)
|
||||
.parse::<http::Uri>()
|
||||
.ok()
|
||||
.parse::<http::Uri>()
|
||||
.ok()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,16 +69,11 @@ impl std::convert::TryFrom<&str> for Nip05Name {
|
||||
fn try_from(inet: &str) -> Result<Self, Self::Error> {
|
||||
// break full name at the @ boundary.
|
||||
let components: Vec<&str> = inet.split('@').collect();
|
||||
if components.len() != 2 {
|
||||
Err(Error::CustomError("too many/few components".to_owned()))
|
||||
} else {
|
||||
if components.len() == 2 {
|
||||
// check if local name is valid
|
||||
let local = components[0];
|
||||
let domain = components[1];
|
||||
if local
|
||||
.chars()
|
||||
.all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.')
|
||||
{
|
||||
if local.chars().all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.') {
|
||||
if domain
|
||||
.chars()
|
||||
.all(|x| x.is_alphanumeric() || x == '-' || x == '.')
|
||||
@@ -101,6 +92,8 @@ impl std::convert::TryFrom<&str> for Nip05Name {
|
||||
"invalid character in local part".to_owned(),
|
||||
))
|
||||
}
|
||||
} else {
|
||||
Err(Error::CustomError("too many/few components".to_owned()))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -111,55 +104,30 @@ impl std::fmt::Display for Nip05Name {
|
||||
}
|
||||
}
|
||||
|
||||
// Current time, with a slight foward jitter in seconds
|
||||
fn now_jitter(sec: u64) -> u64 {
|
||||
// random time between now, and 10min in future.
|
||||
let mut rng = rand::thread_rng();
|
||||
let jitter_amount = rng.gen_range(0..sec);
|
||||
let now = unix_time();
|
||||
now.saturating_add(jitter_amount)
|
||||
}
|
||||
|
||||
/// Check if the specified username and address are present and match in this response body
|
||||
fn body_contains_user(username: &str, address: &str, bytes: hyper::body::Bytes) -> Result<bool> {
|
||||
fn body_contains_user(username: &str, address: &str, bytes: &hyper::body::Bytes) -> Result<bool> {
|
||||
// convert the body into json
|
||||
let body: serde_json::Value = serde_json::from_slice(&bytes)?;
|
||||
let body: serde_json::Value = serde_json::from_slice(bytes)?;
|
||||
// ensure we have a names object.
|
||||
let names_map = body
|
||||
.as_object()
|
||||
.and_then(|x| x.get("names"))
|
||||
.and_then(|x| x.as_object())
|
||||
.and_then(serde_json::Value::as_object)
|
||||
.ok_or_else(|| Error::CustomError("not a map".to_owned()))?;
|
||||
// get the pubkey for the requested user
|
||||
let check_name = names_map.get(username).and_then(|x| x.as_str());
|
||||
let check_name = names_map.get(username).and_then(serde_json::Value::as_str);
|
||||
// ensure the address is a match
|
||||
Ok(check_name.map(|x| x == address).unwrap_or(false))
|
||||
Ok(check_name.map_or(false, |x| x == address))
|
||||
}
|
||||
|
||||
impl Verifier {
|
||||
pub fn new(
|
||||
repo: Arc<dyn NostrRepo>,
|
||||
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
|
||||
event_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
settings: crate::config::Settings,
|
||||
) -> Result<Self> {
|
||||
info!("creating NIP-05 verifier");
|
||||
// build a database connection for reading and writing.
|
||||
let write_pool = db::build_pool(
|
||||
"nip05 writer",
|
||||
&settings,
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE,
|
||||
1, // min conns
|
||||
4, // max conns
|
||||
true, // wait for DB
|
||||
);
|
||||
let read_pool = db::build_pool(
|
||||
"nip05 reader",
|
||||
&settings,
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
|
||||
1, // min conns
|
||||
8, // max conns
|
||||
true, // wait for DB
|
||||
);
|
||||
// setup hyper client
|
||||
let https = HttpsConnector::new();
|
||||
let client = Client::builder().build::<_, hyper::Body>(https);
|
||||
@@ -175,10 +143,9 @@ impl Verifier {
|
||||
// duration.
|
||||
let reverify_interval = tokio::time::interval(http_wait_duration);
|
||||
Ok(Verifier {
|
||||
repo,
|
||||
metadata_rx,
|
||||
event_tx,
|
||||
read_pool,
|
||||
write_pool,
|
||||
settings,
|
||||
client,
|
||||
wait_after_finish,
|
||||
@@ -246,44 +213,40 @@ impl Verifier {
|
||||
|
||||
let response_fut = self.client.request(req);
|
||||
|
||||
// HTTP request with timeout
|
||||
match tokio::time::timeout(Duration::from_secs(5), response_fut).await {
|
||||
Ok(response_res) => {
|
||||
// limit size of verification document to 1MB.
|
||||
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
|
||||
let response = response_res?;
|
||||
// determine content length from response
|
||||
let response_content_length = match response.body().size_hint().upper() {
|
||||
Some(v) => v,
|
||||
None => MAX_ALLOWED_RESPONSE_SIZE + 1, // reject missing content length
|
||||
};
|
||||
// TODO: test how hyper handles the client providing an inaccurate content-length.
|
||||
if response_content_length <= MAX_ALLOWED_RESPONSE_SIZE {
|
||||
let (parts, body) = response.into_parts();
|
||||
// TODO: consider redirects
|
||||
if parts.status == http::StatusCode::OK {
|
||||
// parse body, determine if the username / key / address is present
|
||||
let body_bytes = hyper::body::to_bytes(body).await?;
|
||||
let body_matches = body_contains_user(&nip.local, pubkey, body_bytes)?;
|
||||
if body_matches {
|
||||
return Ok(UserWebVerificationStatus::Verified);
|
||||
}
|
||||
// successful response, parsed as a nip-05
|
||||
// document, but this name/pubkey was not
|
||||
// present.
|
||||
return Ok(UserWebVerificationStatus::Unverified);
|
||||
if let Ok(response_res) = tokio::time::timeout(Duration::from_secs(5), response_fut).await {
|
||||
// limit size of verification document to 1MB.
|
||||
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
|
||||
let response = response_res?;
|
||||
// determine content length from response
|
||||
let response_content_length = match response.body().size_hint().upper() {
|
||||
Some(v) => v,
|
||||
None => MAX_ALLOWED_RESPONSE_SIZE + 1, // reject missing content length
|
||||
};
|
||||
// TODO: test how hyper handles the client providing an inaccurate content-length.
|
||||
if response_content_length <= MAX_ALLOWED_RESPONSE_SIZE {
|
||||
let (parts, body) = response.into_parts();
|
||||
// TODO: consider redirects
|
||||
if parts.status == http::StatusCode::OK {
|
||||
// parse body, determine if the username / key / address is present
|
||||
let body_bytes = hyper::body::to_bytes(body).await?;
|
||||
let body_matches = body_contains_user(&nip.local, pubkey, &body_bytes)?;
|
||||
if body_matches {
|
||||
return Ok(UserWebVerificationStatus::Verified);
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
"content length missing or exceeded limits for account: {:?}",
|
||||
nip.to_string()
|
||||
);
|
||||
// successful response, parsed as a nip-05
|
||||
// document, but this name/pubkey was not
|
||||
// present.
|
||||
return Ok(UserWebVerificationStatus::Unverified);
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
"content length missing or exceeded limits for account: {:?}",
|
||||
nip.to_string()
|
||||
);
|
||||
}
|
||||
Err(_) => {
|
||||
info!("timeout verifying account {:?}", nip);
|
||||
return Ok(UserWebVerificationStatus::Unknown);
|
||||
}
|
||||
} else {
|
||||
info!("timeout verifying account {:?}", nip);
|
||||
return Ok(UserWebVerificationStatus::Unknown);
|
||||
}
|
||||
Ok(UserWebVerificationStatus::Unknown)
|
||||
}
|
||||
@@ -294,8 +257,15 @@ impl Verifier {
|
||||
// run a loop, restarting on failure
|
||||
loop {
|
||||
let res = self.run_internal().await;
|
||||
if let Err(e) = res {
|
||||
match res {
|
||||
Err(Error::ChannelClosed) => {
|
||||
// channel was closed, we are shutting down
|
||||
return;
|
||||
},
|
||||
Err(e) => {
|
||||
info!("error in verifier: {:?}", e);
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -309,7 +279,7 @@ impl Verifier {
|
||||
if let Some(naddr) = e.get_nip05_addr() {
|
||||
info!("got metadata event for ({:?},{:?})", naddr.to_string() ,e.get_author_prefix());
|
||||
// Process a new author, checking if they are verified:
|
||||
let check_verified = get_latest_user_verification(self.read_pool.get().expect("could not get connection"), &e.pubkey).await;
|
||||
let check_verified = self.repo.get_latest_user_verification(&e.pubkey).await;
|
||||
// ensure the event we got is more recent than the one we have, otherwise we can ignore it.
|
||||
if let Ok(last_check) = check_verified {
|
||||
if e.created_at <= last_check.event_created {
|
||||
@@ -342,6 +312,7 @@ impl Verifier {
|
||||
}
|
||||
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
|
||||
info!("metadata broadcast channel closed");
|
||||
return Err(Error::ChannelClosed);
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -370,7 +341,7 @@ impl Verifier {
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map(|x| x.as_secs())
|
||||
.unwrap_or(0);
|
||||
let vr = get_oldest_user_verification(self.read_pool.get()?, earliest_epoch).await;
|
||||
let vr = self.repo.get_oldest_user_verification(earliest_epoch).await;
|
||||
match vr {
|
||||
Ok(ref v) => {
|
||||
let new_status = self.get_web_verification(&v.name, &v.address).await;
|
||||
@@ -378,34 +349,37 @@ impl Verifier {
|
||||
UserWebVerificationStatus::Verified => {
|
||||
// freshly verified account, update the
|
||||
// timestamp.
|
||||
self.update_verification_record(self.write_pool.get()?, v)
|
||||
self.repo.update_verification_timestamp(v.rowid)
|
||||
.await?;
|
||||
info!("verification updated for {}", v.to_string());
|
||||
|
||||
}
|
||||
UserWebVerificationStatus::DomainNotAllowed
|
||||
| UserWebVerificationStatus::Unknown => {
|
||||
// server may be offline, or temporarily
|
||||
// blocked by the config file. Note the
|
||||
// failure so we can process something
|
||||
// else.
|
||||
| UserWebVerificationStatus::Unknown => {
|
||||
// server may be offline, or temporarily
|
||||
// blocked by the config file. Note the
|
||||
// failure so we can process something
|
||||
// else.
|
||||
|
||||
// have we had enough failures to give up?
|
||||
if v.failure_count >= max_failures as u64 {
|
||||
info!(
|
||||
"giving up on verifying {:?} after {} failures",
|
||||
v.name, v.failure_count
|
||||
);
|
||||
self.delete_verification_record(self.write_pool.get()?, v)
|
||||
.await?;
|
||||
} else {
|
||||
// record normal failure, incrementing failure count
|
||||
self.fail_verification_record(self.write_pool.get()?, v)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
// have we had enough failures to give up?
|
||||
if v.failure_count >= max_failures as u64 {
|
||||
info!(
|
||||
"giving up on verifying {:?} after {} failures",
|
||||
v.name, v.failure_count
|
||||
);
|
||||
self.repo.delete_verification(v.rowid)
|
||||
.await?;
|
||||
} else {
|
||||
// record normal failure, incrementing failure count
|
||||
info!("verification failed for {}", v.to_string());
|
||||
self.repo.fail_verification(v.rowid).await?;
|
||||
}
|
||||
}
|
||||
UserWebVerificationStatus::Unverified => {
|
||||
// domain has removed the verification, drop
|
||||
// the record on our side.
|
||||
self.delete_verification_record(self.write_pool.get()?, v)
|
||||
info!("verification rescinded for {}", v.to_string());
|
||||
self.repo.delete_verification(v.rowid)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
@@ -426,80 +400,6 @@ impl Verifier {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reset the verification timestamp on a VerificationRecord
|
||||
pub async fn update_verification_record(
|
||||
&mut self,
|
||||
mut conn: db::PooledConnection,
|
||||
vr: &VerificationRecord,
|
||||
) -> Result<()> {
|
||||
let vr_id = vr.rowid;
|
||||
let vr_str = vr.to_string();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
// add some jitter to the verification to prevent everything from stacking up together.
|
||||
let verif_time = now_jitter(600);
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
// update verification time and reset any failure count
|
||||
let query =
|
||||
"UPDATE user_verification SET verified_at=?, failure_count=0 WHERE id=?";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![verif_time, vr_id])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("verification updated for {}", vr_str);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
/// Reset the failure timestamp on a VerificationRecord
|
||||
pub async fn fail_verification_record(
|
||||
&mut self,
|
||||
mut conn: db::PooledConnection,
|
||||
vr: &VerificationRecord,
|
||||
) -> Result<()> {
|
||||
let vr_id = vr.rowid;
|
||||
let vr_str = vr.to_string();
|
||||
let fail_count = vr.failure_count.saturating_add(1);
|
||||
tokio::task::spawn_blocking(move || {
|
||||
// add some jitter to the verification to prevent everything from stacking up together.
|
||||
let fail_time = now_jitter(600);
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let query = "UPDATE user_verification SET failed_at=?, failure_count=? WHERE id=?";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![fail_time, fail_count, vr_id])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("verification failed for {}", vr_str);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
/// Delete a VerificationRecord that is no longer valid
|
||||
pub async fn delete_verification_record(
|
||||
&mut self,
|
||||
mut conn: db::PooledConnection,
|
||||
vr: &VerificationRecord,
|
||||
) -> Result<()> {
|
||||
let vr_id = vr.rowid;
|
||||
let vr_str = vr.to_string();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let query = "DELETE FROM user_verification WHERE id=?;";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![vr_id])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("verification rescinded for {}", vr_str);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
|
||||
/// Persist an event, create a verification record, and broadcast.
|
||||
// TODO: have more event-writing logic handled in the db module.
|
||||
// Right now, these events avoid the rate limit. That is
|
||||
@@ -513,7 +413,7 @@ impl Verifier {
|
||||
// disabled/passive, the event has already been persisted.
|
||||
let should_write_event = self.settings.verified_users.is_enabled();
|
||||
if should_write_event {
|
||||
match db::write_event(&mut self.write_pool.get()?, event) {
|
||||
match self.repo.write_event(event).await {
|
||||
Ok(updated) => {
|
||||
if updated != 0 {
|
||||
info!(
|
||||
@@ -533,7 +433,7 @@ impl Verifier {
|
||||
}
|
||||
}
|
||||
// write the verification record
|
||||
save_verification_record(self.write_pool.get()?, event, name).await?;
|
||||
self.repo.create_verification_record(&event.id, name).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -563,7 +463,7 @@ pub struct VerificationRecord {
|
||||
|
||||
/// Check with settings to determine if a given domain is allowed to
|
||||
/// publish.
|
||||
pub fn is_domain_allowed(
|
||||
#[must_use] pub fn is_domain_allowed(
|
||||
domain: &str,
|
||||
whitelist: &Option<Vec<String>>,
|
||||
blacklist: &Option<Vec<String>>,
|
||||
@@ -583,7 +483,7 @@ pub fn is_domain_allowed(
|
||||
impl VerificationRecord {
|
||||
/// Check if the record is recent enough to be considered valid,
|
||||
/// and the domain is allowed.
|
||||
pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
|
||||
#[must_use] pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
|
||||
//let settings = SETTINGS.read().unwrap();
|
||||
// how long a verification record is good for
|
||||
let nip05_expiration = &verified_users_settings.verify_expiration_duration;
|
||||
@@ -630,130 +530,6 @@ impl std::fmt::Display for VerificationRecord {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new verification record based on an event
|
||||
pub async fn save_verification_record(
|
||||
mut conn: db::PooledConnection,
|
||||
event: &Event,
|
||||
name: &str,
|
||||
) -> Result<()> {
|
||||
let e = hex::decode(&event.id).ok();
|
||||
let n = name.to_owned();
|
||||
let a_prefix = event.get_author_prefix();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
// if we create a /new/ one, we should get rid of any old ones. or group the new ones by name and only consider the latest.
|
||||
let query = "INSERT INTO user_verification (metadata_event, name, verified_at) VALUES ((SELECT id from event WHERE event_hash=?), ?, strftime('%s','now'));";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![e, n])?;
|
||||
// get the row ID
|
||||
let v_id = tx.last_insert_rowid();
|
||||
// delete everything else by this name
|
||||
let del_query = "DELETE FROM user_verification WHERE name = ? AND id != ?;";
|
||||
let mut del_stmt = tx.prepare(del_query)?;
|
||||
let count = del_stmt.execute(params![n,v_id])?;
|
||||
if count > 0 {
|
||||
info!("removed {} old verification records for ({:?},{:?})", count, n, a_prefix);
|
||||
}
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("saved new verification record for ({:?},{:?})", n, a_prefix);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
}).await?
|
||||
}
|
||||
|
||||
/// Retrieve the most recent verification record for a given pubkey (async).
|
||||
pub async fn get_latest_user_verification(
|
||||
conn: db::PooledConnection,
|
||||
pubkey: &str,
|
||||
) -> Result<VerificationRecord> {
|
||||
let p = pubkey.to_owned();
|
||||
tokio::task::spawn_blocking(move || query_latest_user_verification(conn, p)).await?
|
||||
}
|
||||
|
||||
/// Query database for the latest verification record for a given pubkey.
|
||||
pub fn query_latest_user_verification(
|
||||
mut conn: db::PooledConnection,
|
||||
pubkey: String,
|
||||
) -> Result<VerificationRecord> {
|
||||
let tx = conn.transaction()?;
|
||||
let query = "SELECT v.id, v.name, e.event_hash, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE e.author=? ORDER BY e.created_at DESC, v.verified_at DESC, v.failed_at DESC LIMIT 1;";
|
||||
let mut stmt = tx.prepare_cached(query)?;
|
||||
let fields = stmt.query_row(params![hex::decode(&pubkey).ok()], |r| {
|
||||
let rowid: u64 = r.get(0)?;
|
||||
let rowname: String = r.get(1)?;
|
||||
let eventid: Vec<u8> = r.get(2)?;
|
||||
let created_at: u64 = r.get(3)?;
|
||||
// create a tuple since we can't throw non-rusqlite errors in this closure
|
||||
Ok((
|
||||
rowid,
|
||||
rowname,
|
||||
eventid,
|
||||
created_at,
|
||||
r.get(4).ok(),
|
||||
r.get(5).ok(),
|
||||
r.get(6)?,
|
||||
))
|
||||
})?;
|
||||
Ok(VerificationRecord {
|
||||
rowid: fields.0,
|
||||
name: Nip05Name::try_from(&fields.1[..])?,
|
||||
address: pubkey,
|
||||
event: hex::encode(fields.2),
|
||||
event_created: fields.3,
|
||||
last_success: fields.4,
|
||||
last_failure: fields.5,
|
||||
failure_count: fields.6,
|
||||
})
|
||||
}
|
||||
|
||||
/// Retrieve the oldest user verification (async)
|
||||
pub async fn get_oldest_user_verification(
|
||||
conn: db::PooledConnection,
|
||||
earliest: u64,
|
||||
) -> Result<VerificationRecord> {
|
||||
tokio::task::spawn_blocking(move || query_oldest_user_verification(conn, earliest)).await?
|
||||
}
|
||||
|
||||
pub fn query_oldest_user_verification(
|
||||
mut conn: db::PooledConnection,
|
||||
earliest: u64,
|
||||
) -> Result<VerificationRecord> {
|
||||
let tx = conn.transaction()?;
|
||||
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v INNER JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
|
||||
let mut stmt = tx.prepare_cached(query)?;
|
||||
let fields = stmt.query_row(params![earliest, earliest], |r| {
|
||||
let rowid: u64 = r.get(0)?;
|
||||
let rowname: String = r.get(1)?;
|
||||
let eventid: Vec<u8> = r.get(2)?;
|
||||
let pubkey: Vec<u8> = r.get(3)?;
|
||||
let created_at: u64 = r.get(4)?;
|
||||
// create a tuple since we can't throw non-rusqlite errors in this closure
|
||||
Ok((
|
||||
rowid,
|
||||
rowname,
|
||||
eventid,
|
||||
pubkey,
|
||||
created_at,
|
||||
r.get(5).ok(),
|
||||
r.get(6).ok(),
|
||||
r.get(7)?,
|
||||
))
|
||||
})?;
|
||||
let vr = VerificationRecord {
|
||||
rowid: fields.0,
|
||||
name: Nip05Name::try_from(&fields.1[..])?,
|
||||
address: hex::encode(fields.3),
|
||||
event: hex::encode(fields.2),
|
||||
event_created: fields.4,
|
||||
last_success: fields.5,
|
||||
last_failure: fields.6,
|
||||
failure_count: fields.7,
|
||||
};
|
||||
Ok(vr)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -762,7 +538,7 @@ mod tests {
|
||||
fn local_from_inet() {
|
||||
let addr = "bob@example.com";
|
||||
let parsed = Nip05Name::try_from(addr);
|
||||
assert!(!parsed.is_err());
|
||||
assert!(parsed.is_ok());
|
||||
let v = parsed.unwrap();
|
||||
assert_eq!(v.local, "bob");
|
||||
assert_eq!(v.domain, "example.com");
|
||||
|
@@ -19,18 +19,14 @@ pub enum Notice {
|
||||
}
|
||||
|
||||
impl EventResultStatus {
|
||||
pub fn to_bool(&self) -> bool {
|
||||
#[must_use] pub fn to_bool(&self) -> bool {
|
||||
match self {
|
||||
Self::Saved => true,
|
||||
Self::Duplicate => true,
|
||||
Self::Invalid => false,
|
||||
Self::Blocked => false,
|
||||
Self::RateLimited => false,
|
||||
Self::Error => false,
|
||||
Self::Duplicate | Self::Saved => true,
|
||||
Self::Invalid |Self::Blocked | Self::RateLimited | Self::Error => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn prefix(&self) -> &'static str {
|
||||
#[must_use] pub fn prefix(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Saved => "saved",
|
||||
Self::Duplicate => "duplicate",
|
||||
@@ -47,7 +43,7 @@ impl Notice {
|
||||
// Notice::err_msg(format!("{}", err), id)
|
||||
//}
|
||||
|
||||
pub fn message(msg: String) -> Notice {
|
||||
#[must_use] pub fn message(msg: String) -> Notice {
|
||||
Notice::Message(msg)
|
||||
}
|
||||
|
||||
@@ -56,27 +52,27 @@ impl Notice {
|
||||
Notice::EventResult(EventResult { id, msg, status })
|
||||
}
|
||||
|
||||
pub fn invalid(id: String, msg: &str) -> Notice {
|
||||
#[must_use] pub fn invalid(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Invalid)
|
||||
}
|
||||
|
||||
pub fn blocked(id: String, msg: &str) -> Notice {
|
||||
#[must_use] pub fn blocked(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Blocked)
|
||||
}
|
||||
|
||||
pub fn rate_limited(id: String, msg: &str) -> Notice {
|
||||
#[must_use] pub fn rate_limited(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::RateLimited)
|
||||
}
|
||||
|
||||
pub fn duplicate(id: String) -> Notice {
|
||||
#[must_use] pub fn duplicate(id: String) -> Notice {
|
||||
Notice::prefixed(id, "", EventResultStatus::Duplicate)
|
||||
}
|
||||
|
||||
pub fn error(id: String, msg: &str) -> Notice {
|
||||
#[must_use] pub fn error(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Error)
|
||||
}
|
||||
|
||||
pub fn saved(id: String) -> Notice {
|
||||
#[must_use] pub fn saved(id: String) -> Notice {
|
||||
Notice::EventResult(EventResult {
|
||||
id,
|
||||
msg: "".into(),
|
||||
|
69
src/repo/mod.rs
Normal file
69
src/repo/mod.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
use crate::db::QueryResult;
|
||||
use crate::error::Result;
|
||||
use crate::event::Event;
|
||||
use crate::nip05::VerificationRecord;
|
||||
use crate::subscription::Subscription;
|
||||
use crate::utils::unix_time;
|
||||
use async_trait::async_trait;
|
||||
use rand::Rng;
|
||||
|
||||
pub mod sqlite;
|
||||
pub mod sqlite_migration;
|
||||
pub mod postgres;
|
||||
pub mod postgres_migration;
|
||||
|
||||
#[async_trait]
|
||||
pub trait NostrRepo: Send + Sync {
|
||||
/// Start the repository (any initialization or maintenance tasks can be kicked off here)
|
||||
async fn start(&self) -> Result<()>;
|
||||
|
||||
/// Run migrations and return current version
|
||||
async fn migrate_up(&self) -> Result<usize>;
|
||||
|
||||
/// Persist event to database
|
||||
async fn write_event(&self, e: &Event) -> Result<u64>;
|
||||
|
||||
/// Perform a database query using a subscription.
|
||||
///
|
||||
/// The [`Subscription`] is converted into a SQL query. Each result
|
||||
/// is published on the `query_tx` channel as it is returned. If a
|
||||
/// message becomes available on the `abandon_query_rx` channel, the
|
||||
/// query is immediately aborted.
|
||||
async fn query_subscription(
|
||||
&self,
|
||||
sub: Subscription,
|
||||
client_id: String,
|
||||
query_tx: tokio::sync::mpsc::Sender<QueryResult>,
|
||||
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
|
||||
) -> Result<()>;
|
||||
|
||||
/// Perform normal maintenance
|
||||
async fn optimize_db(&self) -> Result<()>;
|
||||
|
||||
/// Create a new verification record connected to a specific event
|
||||
async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()>;
|
||||
|
||||
/// Update verification timestamp
|
||||
async fn update_verification_timestamp(&self, id: u64) -> Result<()>;
|
||||
|
||||
/// Update verification record as failed
|
||||
async fn fail_verification(&self, id: u64) -> Result<()>;
|
||||
|
||||
/// Delete verification record
|
||||
async fn delete_verification(&self, id: u64) -> Result<()>;
|
||||
|
||||
/// Get the latest verification record for a given pubkey.
|
||||
async fn get_latest_user_verification(&self, pub_key: &str) -> Result<VerificationRecord>;
|
||||
|
||||
/// Get oldest verification before timestamp
|
||||
async fn get_oldest_user_verification(&self, before: u64) -> Result<VerificationRecord>;
|
||||
}
|
||||
|
||||
// Current time, with a slight forward jitter in seconds
|
||||
pub(crate) fn now_jitter(sec: u64) -> u64 {
|
||||
// random time between now, and 10min in future.
|
||||
let mut rng = rand::thread_rng();
|
||||
let jitter_amount = rng.gen_range(0..sec);
|
||||
let now = unix_time();
|
||||
now.saturating_add(jitter_amount)
|
||||
}
|
741
src/repo/postgres.rs
Normal file
741
src/repo/postgres.rs
Normal file
@@ -0,0 +1,741 @@
|
||||
use crate::db::QueryResult;
|
||||
use crate::error::Result;
|
||||
use crate::event::{single_char_tagname, Event};
|
||||
use crate::nip05::{Nip05Name, VerificationRecord};
|
||||
use crate::repo::{now_jitter, NostrRepo};
|
||||
use crate::subscription::{ReqFilter, Subscription};
|
||||
use async_std::stream::StreamExt;
|
||||
use async_trait::async_trait;
|
||||
use chrono::{DateTime, TimeZone, Utc};
|
||||
use sqlx::postgres::PgRow;
|
||||
use sqlx::{Error, Execute, FromRow, Postgres, QueryBuilder, Row};
|
||||
use std::time::{Duration, Instant};
|
||||
use sqlx::Error::RowNotFound;
|
||||
|
||||
use crate::hexrange::{hex_range, HexSearch};
|
||||
use crate::repo::postgres_migration::run_migrations;
|
||||
use crate::server::NostrMetrics;
|
||||
use crate::utils::{is_hex, is_lower_hex};
|
||||
use tokio::sync::mpsc::Sender;
|
||||
use tokio::sync::oneshot::Receiver;
|
||||
use tracing::log::trace;
|
||||
use tracing::{debug, error, info};
|
||||
use crate::error;
|
||||
|
||||
pub type PostgresPool = sqlx::pool::Pool<Postgres>;
|
||||
|
||||
pub struct PostgresRepo {
|
||||
conn: PostgresPool,
|
||||
metrics: NostrMetrics,
|
||||
}
|
||||
|
||||
impl PostgresRepo {
|
||||
pub fn new(c: PostgresPool, m: NostrMetrics) -> PostgresRepo {
|
||||
PostgresRepo {
|
||||
conn: c,
|
||||
metrics: m,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl NostrRepo for PostgresRepo {
|
||||
|
||||
async fn start(&self) -> Result<()> {
|
||||
info!("not implemented");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn migrate_up(&self) -> Result<usize> {
|
||||
Ok(run_migrations(&self.conn).await?)
|
||||
}
|
||||
|
||||
async fn write_event(&self, e: &Event) -> Result<u64> {
|
||||
// start transaction
|
||||
let mut tx = self.conn.begin().await?;
|
||||
let start = Instant::now();
|
||||
|
||||
// get relevant fields from event and convert to blobs.
|
||||
let id_blob = hex::decode(&e.id).ok();
|
||||
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
|
||||
let delegator_blob: Option<Vec<u8>> =
|
||||
e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
|
||||
let event_str = serde_json::to_string(&e).unwrap();
|
||||
|
||||
// determine if this event would be shadowed by an existing
|
||||
// replaceable event or parameterized replaceable event.
|
||||
if e.is_replaceable() {
|
||||
let repl_count = sqlx::query(
|
||||
"SELECT e.id FROM event e WHERE e.pub_key=? AND e.kind=? AND e.created_at >= ? LIMIT 1;")
|
||||
.bind(&pubkey_blob)
|
||||
.bind(e.kind as i64)
|
||||
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
|
||||
.fetch_optional(&mut tx)
|
||||
.await?;
|
||||
if repl_count.is_some() {
|
||||
return Ok(0);
|
||||
}
|
||||
}
|
||||
if let Some(d_tag) = e.distinct_param() {
|
||||
let repl_count:i64 = if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) {
|
||||
sqlx::query_scalar(
|
||||
"SELECT count(*) AS count FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.pub_key=$1 AND e.kind=$2 AND t.name='d' AND t.value_hex=$3 AND e.created_at >= $4 LIMIT 1;")
|
||||
.bind(hex::decode(&e.pubkey).ok())
|
||||
.bind(e.kind as i64)
|
||||
.bind(hex::decode(d_tag).ok())
|
||||
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
|
||||
.fetch_one(&mut tx)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_scalar(
|
||||
"SELECT count(*) AS count FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.pub_key=$1 AND e.kind=$2 AND t.name='d' AND t.value=$3 AND e.created_at >= $4 LIMIT 1;")
|
||||
.bind(hex::decode(&e.pubkey).ok())
|
||||
.bind(e.kind as i64)
|
||||
.bind(d_tag.as_bytes())
|
||||
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
|
||||
.fetch_one(&mut tx)
|
||||
.await?
|
||||
};
|
||||
// if any rows were returned, then some newer event with
|
||||
// the same author/kind/tag value exist, and we can ignore
|
||||
// this event.
|
||||
if repl_count > 0 {
|
||||
return Ok(0)
|
||||
}
|
||||
}
|
||||
// ignore if the event hash is a duplicate.
|
||||
let mut ins_count = sqlx::query(
|
||||
r#"INSERT INTO "event"
|
||||
(id, pub_key, created_at, kind, "content", delegated_by)
|
||||
VALUES($1, $2, $3, $4, $5, $6)
|
||||
ON CONFLICT (id) DO NOTHING"#,
|
||||
)
|
||||
.bind(&id_blob)
|
||||
.bind(&pubkey_blob)
|
||||
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
|
||||
.bind(e.kind as i64)
|
||||
.bind(event_str.into_bytes())
|
||||
.bind(delegator_blob)
|
||||
.execute(&mut tx)
|
||||
.await?
|
||||
.rows_affected();
|
||||
|
||||
if ins_count == 0 {
|
||||
// if the event was a duplicate, no need to insert event or
|
||||
// pubkey references. This will abort the txn.
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
// add all tags to the tag table
|
||||
for tag in e.tags.iter() {
|
||||
// ensure we have 2 values.
|
||||
if tag.len() >= 2 {
|
||||
let tag_name = &tag[0];
|
||||
let tag_val = &tag[1];
|
||||
// only single-char tags are searchable
|
||||
let tag_char_opt = single_char_tagname(tag_name);
|
||||
let query = "INSERT INTO tag (event_id, \"name\", value) VALUES($1, $2, $3) \
|
||||
ON CONFLICT (event_id, \"name\", value) DO NOTHING";
|
||||
match &tag_char_opt {
|
||||
Some(_) => {
|
||||
// if tag value is lowercase hex;
|
||||
if is_lower_hex(tag_val) && (tag_val.len() % 2 == 0) {
|
||||
sqlx::query(query)
|
||||
.bind(&id_blob)
|
||||
.bind(tag_name)
|
||||
.bind(hex::decode(tag_val).ok())
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
} else {
|
||||
sqlx::query(query)
|
||||
.bind(&id_blob)
|
||||
.bind(tag_name)
|
||||
.bind(tag_val.as_bytes())
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
if e.is_replaceable() {
|
||||
let update_count = sqlx::query("DELETE FROM \"event\" WHERE kind=$1 and pub_key = $2 and id not in (select id from \"event\" where kind=$1 and pub_key=$2 order by created_at desc limit 1);")
|
||||
.bind(e.kind as i64)
|
||||
.bind(hex::decode(&e.pubkey).ok())
|
||||
.execute(&mut tx)
|
||||
.await?.rows_affected();
|
||||
if update_count > 0 {
|
||||
info!(
|
||||
"hid {} older replaceable kind {} events for author: {:?}",
|
||||
update_count,
|
||||
e.kind,
|
||||
e.get_author_prefix()
|
||||
);
|
||||
}
|
||||
}
|
||||
// parameterized replaceable events
|
||||
// check for parameterized replaceable events that would be hidden; don't insert these either.
|
||||
if let Some(d_tag) = e.distinct_param() {
|
||||
let update_count = if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) {
|
||||
sqlx::query("DELETE FROM event WHERE kind=$1 AND pub_key=$2 AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=$1 AND e.pub_key=$2 AND t.name='d' AND t.value_hex=$3 ORDER BY created_at DESC OFFSET 1);")
|
||||
.bind(e.kind as i64)
|
||||
.bind(hex::decode(&e.pubkey).ok())
|
||||
.bind(hex::decode(d_tag).ok())
|
||||
.execute(&mut tx)
|
||||
.await?.rows_affected()
|
||||
} else {
|
||||
sqlx::query("DELETE FROM event WHERE kind=$1 AND pub_key=$2 AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=$1 AND e.pub_key=$2 AND t.name='d' AND t.value=$3 ORDER BY created_at DESC OFFSET 1);")
|
||||
.bind(e.kind as i64)
|
||||
.bind(hex::decode(&e.pubkey).ok())
|
||||
.bind(d_tag.as_bytes())
|
||||
.execute(&mut tx)
|
||||
.await?.rows_affected()
|
||||
};
|
||||
if update_count > 0 {
|
||||
info!(
|
||||
"removed {} older parameterized replaceable kind {} events for author: {:?}",
|
||||
update_count,
|
||||
e.kind,
|
||||
e.get_author_prefix()
|
||||
);
|
||||
}
|
||||
}
|
||||
// if this event is a deletion, hide the referenced events from the same author.
|
||||
if e.kind == 5 {
|
||||
let event_candidates = e.tag_values_by_name("e");
|
||||
let pub_keys: Vec<Vec<u8>> = event_candidates
|
||||
.iter()
|
||||
.filter(|x| is_hex(x) && x.len() == 64)
|
||||
.filter_map(|x| hex::decode(x).ok())
|
||||
.collect();
|
||||
|
||||
let mut builder = QueryBuilder::new(
|
||||
"UPDATE \"event\" SET hidden = 1::bit(1) WHERE kind != 5 AND pub_key = ",
|
||||
);
|
||||
builder.push_bind(hex::decode(&e.pubkey).ok());
|
||||
builder.push(" AND id IN (");
|
||||
|
||||
let mut sep = builder.separated(", ");
|
||||
for pk in pub_keys {
|
||||
sep.push_bind(pk);
|
||||
}
|
||||
sep.push_unseparated(")");
|
||||
|
||||
let update_count = builder.build().execute(&mut tx).await?.rows_affected();
|
||||
info!(
|
||||
"hid {} deleted events for author {:?}",
|
||||
update_count,
|
||||
e.get_author_prefix()
|
||||
);
|
||||
} else {
|
||||
// check if a deletion has already been recorded for this event.
|
||||
// Only relevant for non-deletion events
|
||||
let del_count = sqlx::query(
|
||||
"SELECT e.id FROM \"event\" e \
|
||||
LEFT JOIN tag t ON e.id = t.event_id \
|
||||
WHERE e.pub_key = $1 AND t.\"name\" = 'e' AND e.kind = 5 AND t.value = $2 LIMIT 1",
|
||||
)
|
||||
.bind(&pubkey_blob)
|
||||
.bind(&id_blob)
|
||||
.fetch_optional(&mut tx)
|
||||
.await?;
|
||||
|
||||
// check if a the query returned a result, meaning we should
|
||||
// hid the current event
|
||||
if del_count.is_some() {
|
||||
// a deletion already existed, mark original event as hidden.
|
||||
info!(
|
||||
"hid event: {:?} due to existing deletion by author: {:?}",
|
||||
e.get_event_id_prefix(),
|
||||
e.get_author_prefix()
|
||||
);
|
||||
sqlx::query("UPDATE \"event\" SET hidden = 1::bit(1) WHERE id = $1")
|
||||
.bind(&id_blob)
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
// event was deleted, so let caller know nothing new
|
||||
// arrived, preventing this from being sent to active
|
||||
// subscriptions
|
||||
ins_count = 0;
|
||||
}
|
||||
}
|
||||
tx.commit().await?;
|
||||
self.metrics
|
||||
.write_events
|
||||
.observe(start.elapsed().as_secs_f64());
|
||||
Ok(ins_count)
|
||||
}
|
||||
|
||||
async fn query_subscription(
|
||||
&self,
|
||||
sub: Subscription,
|
||||
client_id: String,
|
||||
query_tx: Sender<QueryResult>,
|
||||
mut abandon_query_rx: Receiver<()>,
|
||||
) -> Result<()> {
|
||||
let start = Instant::now();
|
||||
let mut row_count: usize = 0;
|
||||
let metrics = &self.metrics;
|
||||
|
||||
for filter in sub.filters.iter() {
|
||||
let start = Instant::now();
|
||||
// generate SQL query
|
||||
let q_filter = query_from_filter(filter);
|
||||
if q_filter.is_none() {
|
||||
debug!("Failed to generate query!");
|
||||
continue;
|
||||
}
|
||||
|
||||
debug!("SQL generated in {:?}", start.elapsed());
|
||||
|
||||
// cutoff for displaying slow queries
|
||||
let slow_cutoff = Duration::from_millis(2000);
|
||||
|
||||
// any client that doesn't cause us to generate new rows in 5
|
||||
// seconds gets dropped.
|
||||
let abort_cutoff = Duration::from_secs(5);
|
||||
|
||||
let start = Instant::now();
|
||||
let mut slow_first_event;
|
||||
let mut last_successful_send = Instant::now();
|
||||
|
||||
// execute the query. Don't cache, since queries vary so much.
|
||||
let mut q_filter = q_filter.unwrap();
|
||||
let q_build = q_filter.build();
|
||||
let sql = q_build.sql();
|
||||
let mut results = q_build.fetch(&self.conn);
|
||||
|
||||
let mut first_result = true;
|
||||
while let Some(row) = results.next().await {
|
||||
if let Err(e) = row {
|
||||
error!("Query failed: {} {} {:?}", e, sql, filter);
|
||||
break;
|
||||
}
|
||||
let first_event_elapsed = start.elapsed();
|
||||
slow_first_event = first_event_elapsed >= slow_cutoff;
|
||||
if first_result {
|
||||
debug!(
|
||||
"first result in {:?} (cid: {}, sub: {:?})",
|
||||
first_event_elapsed, client_id, sub.id
|
||||
);
|
||||
first_result = false;
|
||||
}
|
||||
|
||||
// logging for slow queries; show sub and SQL.
|
||||
// to reduce logging; only show 1/16th of clients (leading 0)
|
||||
if slow_first_event && client_id.starts_with("00") {
|
||||
debug!(
|
||||
"query req (slow): {:?} (cid: {}, sub: {:?})",
|
||||
&sub, client_id, sub.id
|
||||
);
|
||||
} else {
|
||||
trace!(
|
||||
"query req: {:?} (cid: {}, sub: {:?})",
|
||||
&sub,
|
||||
client_id,
|
||||
sub.id
|
||||
);
|
||||
}
|
||||
|
||||
// check if this is still active; every 100 rows
|
||||
if row_count % 100 == 0 && abandon_query_rx.try_recv().is_ok() {
|
||||
debug!("query cancelled by client (cid: {}, sub: {:?})", client_id, sub.id);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
row_count += 1;
|
||||
let event_json: Vec<u8> = row.unwrap().get(0);
|
||||
loop {
|
||||
if query_tx.capacity() != 0 {
|
||||
// we have capacity to add another item
|
||||
break;
|
||||
} else {
|
||||
// the queue is full
|
||||
trace!("db reader thread is stalled");
|
||||
if last_successful_send + abort_cutoff < Instant::now() {
|
||||
// the queue has been full for too long, abort
|
||||
info!("aborting database query due to slow client");
|
||||
metrics.query_aborts.with_label_values(&["slowclient"]).inc();
|
||||
return Ok(());
|
||||
}
|
||||
// give the queue a chance to clear before trying again
|
||||
async_std::task::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: we could use try_send, but we'd have to juggle
|
||||
// getting the query result back as part of the error
|
||||
// result.
|
||||
query_tx
|
||||
.send(QueryResult {
|
||||
sub_id: sub.get_id(),
|
||||
event: String::from_utf8(event_json).unwrap(),
|
||||
})
|
||||
.await
|
||||
.ok();
|
||||
last_successful_send = Instant::now();
|
||||
}
|
||||
}
|
||||
query_tx
|
||||
.send(QueryResult {
|
||||
sub_id: sub.get_id(),
|
||||
event: "EOSE".to_string(),
|
||||
})
|
||||
.await
|
||||
.ok();
|
||||
self.metrics
|
||||
.query_sub
|
||||
.observe(start.elapsed().as_secs_f64());
|
||||
debug!(
|
||||
"query completed in {:?} (cid: {}, sub: {:?}, db_time: {:?}, rows: {})",
|
||||
start.elapsed(),
|
||||
client_id,
|
||||
sub.id,
|
||||
start.elapsed(),
|
||||
row_count
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn optimize_db(&self) -> Result<()> {
|
||||
// Not implemented
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()> {
|
||||
let mut tx = self.conn.begin().await?;
|
||||
|
||||
sqlx::query("DELETE FROM user_verification WHERE \"name\" = $1")
|
||||
.bind(name)
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
|
||||
sqlx::query("INSERT INTO user_verification (event_id, \"name\", verified_at) VALUES ($1, $2, now())")
|
||||
.bind(hex::decode(event_id).ok())
|
||||
.bind(name)
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await?;
|
||||
info!("saved new verification record for ({:?})", name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_verification_timestamp(&self, id: u64) -> Result<()> {
|
||||
// add some jitter to the verification to prevent everything from stacking up together.
|
||||
let verify_time = now_jitter(600);
|
||||
|
||||
// update verification time and reset any failure count
|
||||
sqlx::query(
|
||||
"UPDATE user_verification SET verified_at = $1, fail_count = 0 WHERE id = $2",
|
||||
)
|
||||
.bind(Utc.timestamp_opt(verify_time as i64, 0).unwrap())
|
||||
.bind(id as i64)
|
||||
.execute(&self.conn)
|
||||
.await?;
|
||||
|
||||
info!("verification updated for {}", id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn fail_verification(&self, id: u64) -> Result<()> {
|
||||
sqlx::query("UPDATE user_verification SET failed_at = now(), fail_count = fail_count + 1 WHERE id = $1")
|
||||
.bind(id as i64)
|
||||
.execute(&self.conn)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_verification(&self, id: u64) -> Result<()> {
|
||||
sqlx::query("DELETE FROM user_verification WHERE id = $1")
|
||||
.bind(id as i64)
|
||||
.execute(&self.conn)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_latest_user_verification(&self, pub_key: &str) -> Result<VerificationRecord> {
|
||||
let query = r#"SELECT
|
||||
v.id,
|
||||
v."name",
|
||||
e.id as event_id,
|
||||
e.pub_key,
|
||||
e.created_at,
|
||||
v.verified_at,
|
||||
v.failed_at,
|
||||
v.fail_count
|
||||
FROM user_verification v
|
||||
LEFT JOIN "event" e ON e.id = v.event_id
|
||||
WHERE e.pub_key = $1
|
||||
ORDER BY e.created_at DESC, v.verified_at DESC, v.failed_at DESC
|
||||
LIMIT 1"#;
|
||||
sqlx::query_as::<_, VerificationRecord>(query)
|
||||
.bind(hex::decode(pub_key).ok())
|
||||
.fetch_optional(&self.conn)
|
||||
.await?
|
||||
.ok_or(error::Error::SqlxError(RowNotFound))
|
||||
}
|
||||
|
||||
async fn get_oldest_user_verification(&self, before: u64) -> Result<VerificationRecord> {
|
||||
let query = r#"SELECT
|
||||
v.id,
|
||||
v."name",
|
||||
e.id as event_id,
|
||||
e.pub_key,
|
||||
e.created_at,
|
||||
v.verified_at,
|
||||
v.failed_at,
|
||||
v.fail_count
|
||||
FROM user_verification v
|
||||
LEFT JOIN "event" e ON e.id = v.event_id
|
||||
WHERE (v.verified_at < $1 OR v.verified_at IS NULL)
|
||||
AND (v.failed_at < $1 OR v.failed_at IS NULL)
|
||||
ORDER BY v.verified_at ASC, v.failed_at ASC
|
||||
LIMIT 1"#;
|
||||
sqlx::query_as::<_, VerificationRecord>(query)
|
||||
.bind(Utc.timestamp_opt(before as i64, 0).unwrap())
|
||||
.fetch_optional(&self.conn)
|
||||
.await?
|
||||
.ok_or(error::Error::SqlxError(RowNotFound))
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a dynamic SQL query and params from a subscription filter.
|
||||
fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
|
||||
// if the filter is malformed, don't return anything.
|
||||
if f.force_no_match {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut query = QueryBuilder::new("SELECT e.\"content\", e.created_at FROM \"event\" e WHERE ");
|
||||
|
||||
let mut push_and = false;
|
||||
// Query for "authors", allowing prefix matches
|
||||
if let Some(auth_vec) = &f.authors {
|
||||
// filter out non-hex values
|
||||
let auth_vec: Vec<&String> = auth_vec.iter().filter(|a| is_hex(a)).collect();
|
||||
|
||||
if !auth_vec.is_empty() {
|
||||
query.push("(");
|
||||
|
||||
// shortcut authors into "IN" query
|
||||
let any_is_range = auth_vec.iter().any(|pk| pk.len() != 64);
|
||||
if !any_is_range {
|
||||
query.push("e.pub_key in (");
|
||||
let mut pk_sep = query.separated(", ");
|
||||
for pk in auth_vec.iter() {
|
||||
pk_sep.push_bind(hex::decode(pk).ok());
|
||||
}
|
||||
query.push(") OR e.delegated_by in (");
|
||||
let mut pk_delegated_sep = query.separated(", ");
|
||||
for pk in auth_vec.iter() {
|
||||
pk_delegated_sep.push_bind(hex::decode(pk).ok());
|
||||
}
|
||||
query.push(")");
|
||||
push_and = true;
|
||||
} else {
|
||||
let mut range_authors = query.separated(" OR ");
|
||||
for auth in auth_vec {
|
||||
match hex_range(auth) {
|
||||
Some(HexSearch::Exact(ex)) => {
|
||||
range_authors
|
||||
.push("(e.pub_key = ")
|
||||
.push_bind_unseparated(ex.clone())
|
||||
.push_unseparated(" OR e.delegated_by = ")
|
||||
.push_bind_unseparated(ex)
|
||||
.push_unseparated(")");
|
||||
}
|
||||
Some(HexSearch::Range(lower, upper)) => {
|
||||
range_authors
|
||||
.push("((e.pub_key > ")
|
||||
.push_bind_unseparated(lower.clone())
|
||||
.push_unseparated(" AND e.pub_key < ")
|
||||
.push_bind_unseparated(upper.clone())
|
||||
.push_unseparated(") OR (e.delegated_by > ")
|
||||
.push_bind_unseparated(lower)
|
||||
.push_unseparated(" AND e.delegated_by < ")
|
||||
.push_bind_unseparated(upper)
|
||||
.push_unseparated("))");
|
||||
}
|
||||
Some(HexSearch::LowerOnly(lower)) => {
|
||||
range_authors
|
||||
.push("(e.pub_key > ")
|
||||
.push_bind_unseparated(lower.clone())
|
||||
.push_unseparated(" OR e.delegated_by > ")
|
||||
.push_bind_unseparated(lower)
|
||||
.push_unseparated(")");
|
||||
}
|
||||
None => {
|
||||
info!("Could not parse hex range from author {:?}", auth);
|
||||
}
|
||||
}
|
||||
push_and = true;
|
||||
}
|
||||
}
|
||||
query.push(")");
|
||||
}
|
||||
}
|
||||
|
||||
// Query for Kind
|
||||
if let Some(ks) = &f.kinds {
|
||||
if !ks.is_empty() {
|
||||
if push_and {
|
||||
query.push(" AND ");
|
||||
}
|
||||
push_and = true;
|
||||
|
||||
query.push("e.kind in (");
|
||||
let mut list_query = query.separated(", ");
|
||||
for k in ks.iter() {
|
||||
list_query.push_bind(*k as i64);
|
||||
}
|
||||
query.push(")");
|
||||
}
|
||||
}
|
||||
|
||||
// Query for event, allowing prefix matches
|
||||
if let Some(id_vec) = &f.ids {
|
||||
// filter out non-hex values
|
||||
let id_vec: Vec<&String> = id_vec.iter().filter(|a| is_hex(a)).collect();
|
||||
|
||||
if !id_vec.is_empty() {
|
||||
if push_and {
|
||||
query.push(" AND (");
|
||||
} else {
|
||||
query.push("(");
|
||||
}
|
||||
push_and = true;
|
||||
|
||||
// shortcut ids into "IN" query
|
||||
let any_is_range = id_vec.iter().any(|pk| pk.len() != 64);
|
||||
if !any_is_range {
|
||||
query.push("id in (");
|
||||
let mut sep = query.separated(", ");
|
||||
for id in id_vec.iter() {
|
||||
sep.push_bind(hex::decode(id).ok());
|
||||
}
|
||||
query.push(")");
|
||||
} else {
|
||||
// take each author and convert to a hex search
|
||||
let mut id_query = query.separated(" OR ");
|
||||
for id in id_vec {
|
||||
match hex_range(id) {
|
||||
Some(HexSearch::Exact(ex)) => {
|
||||
id_query
|
||||
.push("(id = ")
|
||||
.push_bind_unseparated(ex)
|
||||
.push_unseparated(")");
|
||||
}
|
||||
Some(HexSearch::Range(lower, upper)) => {
|
||||
id_query
|
||||
.push("(id > ")
|
||||
.push_bind_unseparated(lower)
|
||||
.push_unseparated(" AND id < ")
|
||||
.push_bind_unseparated(upper)
|
||||
.push_unseparated(")");
|
||||
}
|
||||
Some(HexSearch::LowerOnly(lower)) => {
|
||||
id_query
|
||||
.push("(id > ")
|
||||
.push_bind_unseparated(lower)
|
||||
.push_unseparated(")");
|
||||
}
|
||||
None => {
|
||||
info!("Could not parse hex range from id {:?}", id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query.push(")");
|
||||
}
|
||||
}
|
||||
|
||||
// Query for tags
|
||||
if let Some(map) = &f.tags {
|
||||
if !map.is_empty() {
|
||||
if push_and {
|
||||
query.push(" AND ");
|
||||
}
|
||||
push_and = true;
|
||||
|
||||
for (key, val) in map.iter() {
|
||||
query.push("e.id IN (SELECT ee.id FROM \"event\" ee LEFT JOIN tag t on ee.id = t.event_id WHERE ee.hidden != 1::bit(1) and (t.\"name\" = ")
|
||||
.push_bind(key.to_string())
|
||||
.push(" AND (value in (");
|
||||
|
||||
// plain value match first
|
||||
let mut tag_query = query.separated(", ");
|
||||
for v in val.iter() {
|
||||
if (v.len() % 2 != 0) && !is_lower_hex(v) {
|
||||
tag_query.push_bind(v.as_bytes());
|
||||
} else {
|
||||
tag_query.push_bind(hex::decode(v).ok());
|
||||
}
|
||||
}
|
||||
query.push("))))");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Query for timestamp
|
||||
if f.since.is_some() {
|
||||
if push_and {
|
||||
query.push(" AND ");
|
||||
}
|
||||
push_and = true;
|
||||
query
|
||||
.push("e.created_at > ")
|
||||
.push_bind(Utc.timestamp_opt(f.since.unwrap() as i64, 0).unwrap());
|
||||
}
|
||||
|
||||
// Query for timestamp
|
||||
if f.until.is_some() {
|
||||
if push_and {
|
||||
query.push(" AND ");
|
||||
}
|
||||
push_and = true;
|
||||
query
|
||||
.push("e.created_at < ")
|
||||
.push_bind(Utc.timestamp_opt(f.until.unwrap() as i64, 0).unwrap());
|
||||
}
|
||||
|
||||
// never display hidden events
|
||||
if push_and {
|
||||
query.push(" AND e.hidden != 1::bit(1)");
|
||||
} else {
|
||||
query.push("e.hidden != 1::bit(1)");
|
||||
}
|
||||
|
||||
// Apply per-filter limit to this query.
|
||||
// The use of a LIMIT implies a DESC order, to capture only the most recent events.
|
||||
if let Some(lim) = f.limit {
|
||||
query.push(" ORDER BY e.created_at DESC LIMIT ");
|
||||
query.push(lim.min(1000));
|
||||
} else {
|
||||
query.push(" ORDER BY e.created_at ASC LIMIT ");
|
||||
query.push(1000);
|
||||
}
|
||||
Some(query)
|
||||
}
|
||||
|
||||
impl FromRow<'_, PgRow> for VerificationRecord {
|
||||
fn from_row(row: &'_ PgRow) -> std::result::Result<Self, Error> {
|
||||
let name =
|
||||
Nip05Name::try_from(row.get::<'_, &str, &str>("name")).or(Err(RowNotFound))?;
|
||||
Ok(VerificationRecord {
|
||||
rowid: row.get::<'_, i64, &str>("id") as u64,
|
||||
name,
|
||||
address: hex::encode(row.get::<'_, Vec<u8>, &str>("pub_key")),
|
||||
event: hex::encode(row.get::<'_, Vec<u8>, &str>("event_id")),
|
||||
event_created: row.get::<'_, DateTime<Utc>, &str>("created_at").timestamp() as u64,
|
||||
last_success: None,
|
||||
last_failure: match row.try_get::<'_, DateTime<Utc>, &str>("failed_at") {
|
||||
Ok(x) => Some(x.timestamp() as u64),
|
||||
_ => None,
|
||||
},
|
||||
failure_count: row.get::<'_, i32, &str>("fail_count") as u64,
|
||||
})
|
||||
}
|
||||
}
|
258
src/repo/postgres_migration.rs
Normal file
258
src/repo/postgres_migration.rs
Normal file
@@ -0,0 +1,258 @@
|
||||
use crate::repo::postgres::PostgresPool;
|
||||
use async_trait::async_trait;
|
||||
use sqlx::{Executor, Postgres, Transaction};
|
||||
|
||||
#[async_trait]
|
||||
pub trait Migration {
|
||||
fn serial_number(&self) -> i64;
|
||||
async fn run(&self, tx: &mut Transaction<Postgres>);
|
||||
}
|
||||
|
||||
struct SimpleSqlMigration {
|
||||
pub serial_number: i64,
|
||||
pub sql: Vec<&'static str>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Migration for SimpleSqlMigration {
|
||||
fn serial_number(&self) -> i64 {
|
||||
self.serial_number
|
||||
}
|
||||
|
||||
async fn run(&self, tx: &mut Transaction<Postgres>) {
|
||||
for sql in self.sql.iter() {
|
||||
tx.execute(*sql).await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute all migrations on the database.
|
||||
pub async fn run_migrations(db: &PostgresPool) -> crate::error::Result<usize> {
|
||||
prepare_migrations_table(db).await;
|
||||
run_migration(m001::migration(), db).await;
|
||||
let m002_result = run_migration(m002::migration(), db).await;
|
||||
if m002_result == MigrationResult::Upgraded {
|
||||
m002::rebuild_tags(db).await?;
|
||||
}
|
||||
run_migration(m003::migration(), db).await;
|
||||
Ok(current_version(db).await as usize)
|
||||
}
|
||||
|
||||
async fn current_version(db: &PostgresPool) -> i64 {
|
||||
sqlx::query_scalar("SELECT max(serial_number) FROM migrations;")
|
||||
.fetch_one(db)
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
async fn prepare_migrations_table(db: &PostgresPool) {
|
||||
sqlx::query("CREATE TABLE IF NOT EXISTS migrations (serial_number bigint)")
|
||||
.execute(db)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Running a migration was either unnecessary, or completed
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
enum MigrationResult {
|
||||
Upgraded,
|
||||
NotNeeded,
|
||||
}
|
||||
|
||||
async fn run_migration(migration: impl Migration, db: &PostgresPool) -> MigrationResult {
|
||||
let row: i64 =
|
||||
sqlx::query_scalar("SELECT COUNT(*) AS count FROM migrations WHERE serial_number = $1")
|
||||
.bind(migration.serial_number())
|
||||
.fetch_one(db)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
if row > 0 {
|
||||
return MigrationResult::NotNeeded;
|
||||
}
|
||||
|
||||
let mut transaction = db.begin().await.unwrap();
|
||||
migration.run(&mut transaction).await;
|
||||
|
||||
sqlx::query("INSERT INTO migrations VALUES ($1)")
|
||||
.bind(migration.serial_number())
|
||||
.execute(&mut transaction)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
transaction.commit().await.unwrap();
|
||||
MigrationResult::Upgraded
|
||||
}
|
||||
|
||||
mod m001 {
|
||||
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
|
||||
|
||||
pub const VERSION: i64 = 1;
|
||||
|
||||
pub fn migration() -> impl Migration {
|
||||
SimpleSqlMigration {
|
||||
serial_number: VERSION,
|
||||
sql: vec![
|
||||
r#"
|
||||
-- Events table
|
||||
CREATE TABLE "event" (
|
||||
id bytea NOT NULL,
|
||||
pub_key bytea NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
kind integer NOT NULL,
|
||||
"content" bytea NOT NULL,
|
||||
hidden bit(1) NOT NULL DEFAULT 0::bit(1),
|
||||
delegated_by bytea NULL,
|
||||
first_seen timestamp with time zone NOT NULL DEFAULT now(),
|
||||
CONSTRAINT event_pkey PRIMARY KEY (id)
|
||||
);
|
||||
CREATE INDEX event_created_at_idx ON "event" (created_at,kind);
|
||||
CREATE INDEX event_pub_key_idx ON "event" (pub_key);
|
||||
CREATE INDEX event_delegated_by_idx ON "event" (delegated_by);
|
||||
|
||||
-- Tags table
|
||||
CREATE TABLE "tag" (
|
||||
id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY,
|
||||
event_id bytea NOT NULL,
|
||||
"name" varchar NOT NULL,
|
||||
value bytea NOT NULL,
|
||||
CONSTRAINT tag_fk FOREIGN KEY (event_id) REFERENCES "event"(id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX tag_event_id_idx ON tag USING btree (event_id, name);
|
||||
CREATE INDEX tag_value_idx ON tag USING btree (value);
|
||||
|
||||
-- NIP-05 Verfication table
|
||||
CREATE TABLE "user_verification" (
|
||||
id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY,
|
||||
event_id bytea NOT NULL,
|
||||
"name" varchar NOT NULL,
|
||||
verified_at timestamptz NULL,
|
||||
failed_at timestamptz NULL,
|
||||
fail_count int4 NULL DEFAULT 0,
|
||||
CONSTRAINT user_verification_pk PRIMARY KEY (id),
|
||||
CONSTRAINT user_verification_fk FOREIGN KEY (event_id) REFERENCES "event"(id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX user_verification_event_id_idx ON user_verification USING btree (event_id);
|
||||
CREATE INDEX user_verification_name_idx ON user_verification USING btree (name);
|
||||
"#,
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod m002 {
|
||||
use async_std::stream::StreamExt;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use sqlx::Row;
|
||||
use std::time::Instant;
|
||||
use tracing::info;
|
||||
|
||||
use crate::event::{single_char_tagname, Event};
|
||||
use crate::repo::postgres::PostgresPool;
|
||||
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
|
||||
use crate::utils::is_lower_hex;
|
||||
|
||||
pub const VERSION: i64 = 2;
|
||||
|
||||
pub fn migration() -> impl Migration {
|
||||
SimpleSqlMigration {
|
||||
serial_number: VERSION,
|
||||
sql: vec![
|
||||
r#"
|
||||
-- Add tag value column
|
||||
ALTER TABLE tag ADD COLUMN value_hex bytea;
|
||||
-- Remove not-null constraint
|
||||
ALTER TABLE tag ALTER COLUMN value DROP NOT NULL;
|
||||
-- Add value index
|
||||
CREATE INDEX tag_value_hex_idx ON tag USING btree (value_hex);
|
||||
"#,
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn rebuild_tags(db: &PostgresPool) -> crate::error::Result<()> {
|
||||
// Check how many events we have to process
|
||||
let start = Instant::now();
|
||||
let mut tx = db.begin().await.unwrap();
|
||||
let mut update_tx = db.begin().await.unwrap();
|
||||
// Clear out table
|
||||
sqlx::query("DELETE FROM tag;")
|
||||
.execute(&mut update_tx)
|
||||
.await?;
|
||||
{
|
||||
let event_count: i64 = sqlx::query_scalar("SELECT COUNT(*) from event;")
|
||||
.fetch_one(&mut tx)
|
||||
.await
|
||||
.unwrap();
|
||||
let bar = ProgressBar::new(event_count.try_into().unwrap())
|
||||
.with_message("rebuilding tags table");
|
||||
bar.set_style(
|
||||
ProgressStyle::with_template(
|
||||
"[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
let mut events =
|
||||
sqlx::query("SELECT id, content FROM event ORDER BY id;").fetch(&mut tx);
|
||||
while let Some(row) = events.next().await {
|
||||
bar.inc(1);
|
||||
// get the row id and content
|
||||
let row = row.unwrap();
|
||||
let event_id: Vec<u8> = row.get(0);
|
||||
let event_bytes: Vec<u8> = row.get(1);
|
||||
let event: Event = serde_json::from_str(&String::from_utf8(event_bytes).unwrap())?;
|
||||
|
||||
for t in event.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagnamechar_opt = single_char_tagname(tagname);
|
||||
if tagnamechar_opt.is_none() {
|
||||
continue;
|
||||
}
|
||||
// safe because len was > 1
|
||||
let tagval = t.get(1).unwrap();
|
||||
// insert as BLOB if we can restore it losslessly.
|
||||
// this means it needs to be even length and lowercase.
|
||||
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
|
||||
let q = "INSERT INTO tag (event_id, \"name\", value_hex) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING;";
|
||||
sqlx::query(q)
|
||||
.bind(&event_id)
|
||||
.bind(tagname)
|
||||
.bind(hex::decode(tagval).ok())
|
||||
.execute(&mut update_tx)
|
||||
.await?;
|
||||
} else {
|
||||
let q = "INSERT INTO tag (event_id, \"name\", value) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING;";
|
||||
sqlx::query(q)
|
||||
.bind(&event_id)
|
||||
.bind(tagname)
|
||||
.bind(tagval.as_bytes())
|
||||
.execute(&mut update_tx)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
update_tx.commit().await?;
|
||||
bar.finish();
|
||||
}
|
||||
info!("rebuilt tags in {:?}", start.elapsed());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
mod m003 {
|
||||
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
|
||||
|
||||
pub const VERSION: i64 = 3;
|
||||
|
||||
pub fn migration() -> impl Migration {
|
||||
SimpleSqlMigration {
|
||||
serial_number: VERSION,
|
||||
sql: vec![
|
||||
r#"
|
||||
-- Add unique constraint on tag
|
||||
ALTER TABLE tag ADD CONSTRAINT unique_constraint_name UNIQUE (event_id, "name", value);
|
||||
"#,
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
1039
src/repo/sqlite.rs
Normal file
1039
src/repo/sqlite.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -10,17 +10,20 @@ use rusqlite::Connection;
|
||||
use std::cmp::Ordering;
|
||||
use std::time::Instant;
|
||||
use tracing::{debug, error, info};
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
|
||||
/// Startup DB Pragmas
|
||||
pub const STARTUP_SQL: &str = r##"
|
||||
PRAGMA main.synchronous = NORMAL;
|
||||
PRAGMA foreign_keys = ON;
|
||||
PRAGMA journal_size_limit = 32768;
|
||||
PRAGMA temp_store = 2; -- use memory, not temp files
|
||||
PRAGMA main.cache_size = 20000; -- 80MB max cache size per conn
|
||||
pragma mmap_size = 17179869184; -- cap mmap at 16GB
|
||||
"##;
|
||||
|
||||
/// Latest database version
|
||||
pub const DB_VERSION: usize = 13;
|
||||
pub const DB_VERSION: usize = 16;
|
||||
|
||||
/// Schema definition
|
||||
const INIT_SQL: &str = formatcp!(
|
||||
@@ -50,27 +53,34 @@ content TEXT NOT NULL -- serialized json of event object
|
||||
-- Event Indexes
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
|
||||
CREATE INDEX IF NOT EXISTS author_index ON event(author);
|
||||
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
|
||||
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
|
||||
CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by);
|
||||
CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at);
|
||||
CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author);
|
||||
CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at);
|
||||
CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at);
|
||||
CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind);
|
||||
|
||||
-- Tag Table
|
||||
-- Tag values are stored as either a BLOB (if they come in as a
|
||||
-- hex-string), or TEXT otherwise.
|
||||
-- This means that searches need to select the appropriate column.
|
||||
-- We duplicate the kind/created_at to make indexes much more efficient.
|
||||
CREATE TABLE IF NOT EXISTS tag (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
||||
name TEXT, -- the tag name ("p", "e", whatever)
|
||||
value TEXT, -- the tag value, if not hex.
|
||||
value_hex BLOB, -- the tag value, if it can be interpreted as a lowercase hex string.
|
||||
created_at INTEGER NOT NULL, -- when the event was authored
|
||||
kind INTEGER NOT NULL, -- event kind
|
||||
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_hex_index ON tag(value_hex);
|
||||
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
|
||||
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,event_id);
|
||||
|
||||
-- NIP-05 User Validation
|
||||
CREATE TABLE IF NOT EXISTS user_verification (
|
||||
@@ -95,7 +105,21 @@ pub fn curr_db_version(conn: &mut Connection) -> Result<usize> {
|
||||
Ok(curr_version)
|
||||
}
|
||||
|
||||
fn mig_init(conn: &mut PooledConnection) -> Result<usize> {
|
||||
/// Determine event count
|
||||
pub fn db_event_count(conn: &mut Connection) -> Result<usize> {
|
||||
let query = "SELECT count(*) FROM event;";
|
||||
let count = conn.query_row(query, [], |row| row.get(0))?;
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
/// Determine tag count
|
||||
pub fn db_tag_count(conn: &mut Connection) -> Result<usize> {
|
||||
let query = "SELECT count(*) FROM tag;";
|
||||
let count = conn.query_row(query, [], |row| row.get(0))?;
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
fn mig_init(conn: &mut PooledConnection) -> usize {
|
||||
match conn.execute_batch(INIT_SQL) {
|
||||
Ok(()) => {
|
||||
info!(
|
||||
@@ -108,11 +132,11 @@ fn mig_init(conn: &mut PooledConnection) -> Result<usize> {
|
||||
panic!("database could not be initialized");
|
||||
}
|
||||
}
|
||||
Ok(DB_VERSION)
|
||||
DB_VERSION
|
||||
}
|
||||
|
||||
/// Upgrade DB to latest version, and execute pragma settings
|
||||
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<usize> {
|
||||
// check the version.
|
||||
let mut curr_version = curr_db_version(conn)?;
|
||||
info!("DB version = {:?}", curr_version);
|
||||
@@ -123,11 +147,11 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
);
|
||||
debug!(
|
||||
"SQLite max table/blob/text length: {} MB",
|
||||
(conn.limit(Limit::SQLITE_LIMIT_LENGTH) as f64 / (1024 * 1024) as f64).floor()
|
||||
(f64::from(conn.limit(Limit::SQLITE_LIMIT_LENGTH)) / f64::from(1024 * 1024)).floor()
|
||||
);
|
||||
debug!(
|
||||
"SQLite max SQL length: {} MB",
|
||||
(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH) as f64 / (1024 * 1024) as f64).floor()
|
||||
(f64::from(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH)) / f64::from(1024 * 1024)).floor()
|
||||
);
|
||||
|
||||
match curr_version.cmp(&DB_VERSION) {
|
||||
@@ -135,26 +159,22 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
Ordering::Less => {
|
||||
// initialize from scratch
|
||||
if curr_version == 0 {
|
||||
curr_version = mig_init(conn)?;
|
||||
curr_version = mig_init(conn);
|
||||
}
|
||||
// for initialized but out-of-date schemas, proceed to
|
||||
// upgrade sequentially until we are current.
|
||||
if curr_version == 1 {
|
||||
curr_version = mig_1_to_2(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 2 {
|
||||
curr_version = mig_2_to_3(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 3 {
|
||||
curr_version = mig_3_to_4(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 4 {
|
||||
curr_version = mig_4_to_5(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == 5 {
|
||||
curr_version = mig_5_to_6(conn)?;
|
||||
}
|
||||
@@ -179,6 +199,15 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
if curr_version == 12 {
|
||||
curr_version = mig_12_to_13(conn)?;
|
||||
}
|
||||
if curr_version == 13 {
|
||||
curr_version = mig_13_to_14(conn)?;
|
||||
}
|
||||
if curr_version == 14 {
|
||||
curr_version = mig_14_to_15(conn)?;
|
||||
}
|
||||
if curr_version == 15 {
|
||||
curr_version = mig_15_to_16(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == DB_VERSION {
|
||||
info!(
|
||||
@@ -189,13 +218,12 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
}
|
||||
// Database is current, all is good
|
||||
Ordering::Equal => {
|
||||
debug!("Database version was already current (v{})", DB_VERSION);
|
||||
debug!("Database version was already current (v{DB_VERSION})");
|
||||
}
|
||||
// Database is newer than what this code understands, abort
|
||||
Ordering::Greater => {
|
||||
panic!(
|
||||
"Database version is newer than supported by this executable (v{} > v{})",
|
||||
curr_version, DB_VERSION
|
||||
"Database version is newer than supported by this executable (v{curr_version} > v{DB_VERSION})",
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -203,9 +231,65 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
// Setup PRAGMA
|
||||
conn.execute_batch(STARTUP_SQL)?;
|
||||
debug!("SQLite PRAGMA startup completed");
|
||||
Ok(DB_VERSION)
|
||||
}
|
||||
|
||||
pub fn rebuild_tags(conn: &mut PooledConnection) -> Result<()> {
|
||||
// Check how many events we have to process
|
||||
let count = db_event_count(conn)?;
|
||||
let update_each_percent = 0.05;
|
||||
let mut percent_done = 0.0;
|
||||
let mut events_processed = 0;
|
||||
let start = Instant::now();
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
// Clear out table
|
||||
tx.execute("DELETE FROM tag;", [])?;
|
||||
let mut stmt = tx.prepare("select id, content from event order by id;")?;
|
||||
let mut tag_rows = stmt.query([])?;
|
||||
while let Some(row) = tag_rows.next()? {
|
||||
if (events_processed as f32)/(count as f32) > percent_done {
|
||||
info!("Tag update {}% complete...", (100.0*percent_done).round());
|
||||
percent_done += update_each_percent;
|
||||
}
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_id: u64 = row.get(0)?;
|
||||
let event_json: String = row.get(1)?;
|
||||
let event: Event = serde_json::from_str(&event_json)?;
|
||||
// look at each event, and each tag, creating new tag entries if appropriate.
|
||||
for t in event.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagnamechar_opt = single_char_tagname(tagname);
|
||||
if tagnamechar_opt.is_none() {
|
||||
continue;
|
||||
}
|
||||
// safe because len was > 1
|
||||
let tagval = t.get(1).unwrap();
|
||||
// insert as BLOB if we can restore it losslessly.
|
||||
// this means it needs to be even length and lowercase.
|
||||
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tagname, hex::decode(tagval).ok()],
|
||||
)?;
|
||||
} else {
|
||||
// otherwise, insert as text
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tagname, &tagval],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
events_processed += 1;
|
||||
}
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("rebuilt tags in {:?}", start.elapsed());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
|
||||
//// Migration Scripts
|
||||
|
||||
fn mig_1_to_2(conn: &mut PooledConnection) -> Result<usize> {
|
||||
@@ -337,7 +421,6 @@ fn mig_5_to_6(conn: &mut PooledConnection) -> Result<usize> {
|
||||
let mut stmt = tx.prepare("select id, content from event order by id;")?;
|
||||
let mut tag_rows = stmt.query([])?;
|
||||
while let Some(row) = tag_rows.next()? {
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_id: u64 = row.get(0)?;
|
||||
let event_json: String = row.get(1)?;
|
||||
let event: Event = serde_json::from_str(&event_json)?;
|
||||
@@ -485,6 +568,7 @@ fn mig_11_to_12(conn: &mut PooledConnection) -> Result<usize> {
|
||||
// Lookup every replaceable event
|
||||
let mut stmt = tx.prepare("select kind,author from event where kind in (0,3,41) or (kind>=10000 and kind<20000) order by id;")?;
|
||||
let mut replaceable_rows = stmt.query([])?;
|
||||
info!("updating replaceable events; this could take awhile...");
|
||||
while let Some(row) = replaceable_rows.next()? {
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_kind: u64 = row.get(0)?;
|
||||
@@ -524,3 +608,124 @@ PRAGMA user_version = 13;
|
||||
}
|
||||
Ok(13)
|
||||
}
|
||||
|
||||
fn mig_13_to_14(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 13->14");
|
||||
let upgrade_sql = r##"
|
||||
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
|
||||
CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at);
|
||||
pragma optimize;
|
||||
PRAGMA user_version = 14;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v13 -> v14");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(14)
|
||||
}
|
||||
|
||||
fn mig_14_to_15(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 14->15");
|
||||
let upgrade_sql = r##"
|
||||
CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at);
|
||||
CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind);
|
||||
PRAGMA user_version = 15;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v14 -> v15");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
// clear out hidden events
|
||||
let clear_hidden_sql = r##"DELETE FROM event WHERE HIDDEN=true;"##;
|
||||
info!("removing hidden events; this may take awhile...");
|
||||
match conn.execute_batch(clear_hidden_sql) {
|
||||
Ok(()) => {
|
||||
info!("all hidden events removed");
|
||||
},
|
||||
Err(err) => {
|
||||
error!("delete failed: {}", err);
|
||||
panic!("could not remove hidden events");
|
||||
}
|
||||
}
|
||||
Ok(15)
|
||||
}
|
||||
|
||||
fn mig_15_to_16(conn: &mut PooledConnection) -> Result<usize> {
|
||||
let count = db_event_count(conn)?;
|
||||
info!("database schema needs update from 15->16 (this make take a few minutes)");
|
||||
let upgrade_sql = r##"
|
||||
DROP TABLE tag;
|
||||
CREATE TABLE tag (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
||||
name TEXT, -- the tag name ("p", "e", whatever)
|
||||
value TEXT, -- the tag value, if not hex.
|
||||
created_at INTEGER NOT NULL, -- when the event was authored
|
||||
kind INTEGER NOT NULL, -- event kind
|
||||
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
|
||||
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,event_id);
|
||||
"##;
|
||||
|
||||
let start = Instant::now();
|
||||
let tx = conn.transaction()?;
|
||||
|
||||
let bar = ProgressBar::new(count.try_into().unwrap())
|
||||
.with_message("rebuilding tags table");
|
||||
bar.set_style(
|
||||
ProgressStyle::with_template(
|
||||
"[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
{
|
||||
tx.execute_batch(upgrade_sql)?;
|
||||
let mut stmt = tx.prepare("select id, kind, created_at, content from event order by id;")?;
|
||||
let mut tag_rows = stmt.query([])?;
|
||||
let mut count = 0;
|
||||
while let Some(row) = tag_rows.next()? {
|
||||
count += 1;
|
||||
if count%10==0 {
|
||||
bar.inc(10);
|
||||
}
|
||||
let event_id: u64 = row.get(0)?;
|
||||
let kind: u64 = row.get(1)?;
|
||||
let created_at: u64 = row.get(2)?;
|
||||
let event_json: String = row.get(3)?;
|
||||
let event: Event = serde_json::from_str(&event_json)?;
|
||||
// look at each event, and each tag, creating new tag entries if appropriate.
|
||||
for t in event.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagnamechar_opt = single_char_tagname(tagname);
|
||||
if tagnamechar_opt.is_none() {
|
||||
continue;
|
||||
}
|
||||
// safe because len was > 1
|
||||
let tagval = t.get(1).unwrap();
|
||||
// otherwise, insert as text
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value, kind, created_at) VALUES (?1, ?2, ?3, ?4, ?5);",
|
||||
params![event_id, tagname, &tagval, kind, created_at],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
tx.execute("PRAGMA user_version = 16;", [])?;
|
||||
}
|
||||
bar.finish();
|
||||
tx.commit()?;
|
||||
info!("database schema upgraded v15 -> v16 in {:?}", start.elapsed());
|
||||
Ok(16)
|
||||
}
|
456
src/server.rs
456
src/server.rs
@@ -3,6 +3,7 @@ use crate::close::Close;
|
||||
use crate::close::CloseCmd;
|
||||
use crate::config::{Settings, VerifiedUsersMode};
|
||||
use crate::conn;
|
||||
use crate::repo::NostrRepo;
|
||||
use crate::db;
|
||||
use crate::db::SubmittedEvent;
|
||||
use crate::error::{Error, Result};
|
||||
@@ -12,6 +13,9 @@ use crate::info::RelayInfo;
|
||||
use crate::nip05;
|
||||
use crate::notice::Notice;
|
||||
use crate::subscription::Subscription;
|
||||
use prometheus::IntCounterVec;
|
||||
use prometheus::IntGauge;
|
||||
use prometheus::{Encoder, Histogram, IntCounter, HistogramOpts, Opts, Registry, TextEncoder};
|
||||
use futures::SinkExt;
|
||||
use futures::StreamExt;
|
||||
use governor::{Jitter, Quota, RateLimiter};
|
||||
@@ -22,10 +26,8 @@ use hyper::upgrade::Upgraded;
|
||||
use hyper::{
|
||||
header, server::conn::AddrStream, upgrade, Body, Request, Response, Server, StatusCode,
|
||||
};
|
||||
use rusqlite::OpenFlags;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use tokio::sync::Mutex;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::Infallible;
|
||||
use std::net::SocketAddr;
|
||||
@@ -40,23 +42,25 @@ use tokio::sync::broadcast::{self, Receiver, Sender};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio_tungstenite::WebSocketStream;
|
||||
use tracing::*;
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
use tungstenite::error::CapacityError::MessageTooLong;
|
||||
use tungstenite::error::Error as WsError;
|
||||
use tungstenite::handshake;
|
||||
use tungstenite::protocol::Message;
|
||||
use tungstenite::protocol::WebSocketConfig;
|
||||
|
||||
/// Handle arbitrary HTTP requests, including for WebSocket upgrades.
|
||||
/// Handle arbitrary HTTP requests, including for `WebSocket` upgrades.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn handle_web_request(
|
||||
mut request: Request<Body>,
|
||||
pool: db::SqlitePool,
|
||||
repo: Arc<dyn NostrRepo>,
|
||||
settings: Settings,
|
||||
remote_addr: SocketAddr,
|
||||
broadcast: Sender<Event>,
|
||||
event_tx: tokio::sync::mpsc::Sender<SubmittedEvent>,
|
||||
shutdown: Receiver<()>,
|
||||
safe_to_read: Arc<Mutex<u64>>,
|
||||
registry: Registry,
|
||||
metrics: NostrMetrics,
|
||||
) -> Result<Response<Body>, Infallible> {
|
||||
match (
|
||||
request.uri().path(),
|
||||
@@ -92,7 +96,7 @@ async fn handle_web_request(
|
||||
tokio_tungstenite::tungstenite::protocol::Role::Server,
|
||||
Some(config),
|
||||
)
|
||||
.await;
|
||||
.await;
|
||||
let origin = get_header_string("origin", request.headers());
|
||||
let user_agent = get_header_string("user-agent", request.headers());
|
||||
// determine the remote IP from headers if the exist
|
||||
@@ -111,22 +115,21 @@ async fn handle_web_request(
|
||||
};
|
||||
// spawn a nostr server with our websocket
|
||||
tokio::spawn(nostr_server(
|
||||
pool,
|
||||
repo,
|
||||
client_info,
|
||||
settings,
|
||||
ws_stream,
|
||||
broadcast,
|
||||
event_tx,
|
||||
shutdown,
|
||||
safe_to_read,
|
||||
metrics,
|
||||
));
|
||||
}
|
||||
// todo: trace, don't print...
|
||||
Err(e) => println!(
|
||||
"error when trying to upgrade connection \
|
||||
from address {} to websocket connection. \
|
||||
Error is: {}",
|
||||
remote_addr, e
|
||||
from address {remote_addr} to websocket connection. \
|
||||
Error is: {e}",
|
||||
),
|
||||
}
|
||||
});
|
||||
@@ -136,7 +139,7 @@ async fn handle_web_request(
|
||||
Err(error) => {
|
||||
warn!("websocket response failed");
|
||||
let mut res =
|
||||
Response::new(Body::from(format!("Failed to create websocket: {}", error)));
|
||||
Response::new(Body::from(format!("Failed to create websocket: {error}")));
|
||||
*res.status_mut() = StatusCode::BAD_REQUEST;
|
||||
return Ok(res);
|
||||
}
|
||||
@@ -157,26 +160,38 @@ async fn handle_web_request(
|
||||
let rinfo = RelayInfo::from(settings.info);
|
||||
let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap());
|
||||
return Ok(Response::builder()
|
||||
.status(200)
|
||||
.header("Content-Type", "application/nostr+json")
|
||||
.header("Access-Control-Allow-Origin", "*")
|
||||
.body(b)
|
||||
.unwrap());
|
||||
.status(200)
|
||||
.header("Content-Type", "application/nostr+json")
|
||||
.header("Access-Control-Allow-Origin", "*")
|
||||
.body(b)
|
||||
.unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Response::builder()
|
||||
.status(200)
|
||||
.header("Content-Type", "text/plain")
|
||||
.body(Body::from("Please use a Nostr client to connect."))
|
||||
.unwrap())
|
||||
.status(200)
|
||||
.header("Content-Type", "text/plain")
|
||||
.body(Body::from("Please use a Nostr client to connect."))
|
||||
.unwrap())
|
||||
}
|
||||
("/metrics", false) => {
|
||||
let mut buffer = vec![];
|
||||
let encoder = TextEncoder::new();
|
||||
let metric_families = registry.gather();
|
||||
encoder.encode(&metric_families, &mut buffer).unwrap();
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", "text/plain")
|
||||
.body(Body::from(buffer))
|
||||
.unwrap())
|
||||
}
|
||||
(_, _) => {
|
||||
//handle any other url
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::from("Nothing here."))
|
||||
.unwrap())
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::from("Nothing here."))
|
||||
.unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -184,7 +199,7 @@ async fn handle_web_request(
|
||||
fn get_header_string(header: &str, headers: &HeaderMap) -> Option<String> {
|
||||
headers
|
||||
.get(header)
|
||||
.and_then(|x| x.to_str().ok().map(|x| x.to_string()))
|
||||
.and_then(|x| x.to_str().ok().map(std::string::ToString::to_string))
|
||||
}
|
||||
|
||||
// return on a control-c or internally requested shutdown signal
|
||||
@@ -194,24 +209,98 @@ async fn ctrl_c_or_signal(mut shutdown_signal: Receiver<()>) {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = shutdown_signal.recv() => {
|
||||
info!("Shutting down webserver as requested");
|
||||
// server shutting down, exit loop
|
||||
break;
|
||||
},
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
info!("Shutting down webserver due to SIGINT");
|
||||
break;
|
||||
info!("Shutting down webserver as requested");
|
||||
// server shutting down, exit loop
|
||||
break;
|
||||
},
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
info!("Shutting down webserver due to SIGINT");
|
||||
break;
|
||||
},
|
||||
_ = term_signal.recv() => {
|
||||
info!("Shutting down webserver due to SIGTERM");
|
||||
break;
|
||||
},
|
||||
_ = term_signal.recv() => {
|
||||
info!("Shutting down webserver due to SIGTERM");
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create_metrics() -> (Registry, NostrMetrics) {
|
||||
// setup prometheus registry
|
||||
let registry = Registry::new();
|
||||
|
||||
let query_sub = Histogram::with_opts(HistogramOpts::new(
|
||||
"nostr_query_seconds",
|
||||
"Subscription response times",
|
||||
)).unwrap();
|
||||
let query_db = Histogram::with_opts(HistogramOpts::new(
|
||||
"nostr_filter_seconds",
|
||||
"Filter SQL query times",
|
||||
)).unwrap();
|
||||
let write_events = Histogram::with_opts(HistogramOpts::new(
|
||||
"nostr_events_write_seconds",
|
||||
"Event writing response times",
|
||||
)).unwrap();
|
||||
let sent_events = IntCounterVec::new(
|
||||
Opts::new("nostr_events_sent_total", "Events sent to clients"),
|
||||
vec!["source"].as_slice(),
|
||||
).unwrap();
|
||||
let connections = IntCounter::with_opts(Opts::new(
|
||||
"nostr_connections_total",
|
||||
"New connections",
|
||||
)).unwrap();
|
||||
let db_connections = IntGauge::with_opts(Opts::new(
|
||||
"nostr_db_connections", "Active database connections"
|
||||
)).unwrap();
|
||||
let query_aborts = IntCounterVec::new(
|
||||
Opts::new("nostr_query_abort_total", "Aborted queries"),
|
||||
vec!["reason"].as_slice(),
|
||||
).unwrap();
|
||||
let cmd_req = IntCounter::with_opts(Opts::new(
|
||||
"nostr_cmd_req_total",
|
||||
"REQ commands",
|
||||
)).unwrap();
|
||||
let cmd_event = IntCounter::with_opts(Opts::new(
|
||||
"nostr_cmd_event_total",
|
||||
"EVENT commands",
|
||||
)).unwrap();
|
||||
let cmd_close = IntCounter::with_opts(Opts::new(
|
||||
"nostr_cmd_close_total",
|
||||
"CLOSE commands",
|
||||
)).unwrap();
|
||||
let disconnects = IntCounterVec::new(
|
||||
Opts::new("nostr_disconnects_total", "Client disconnects"),
|
||||
vec!["reason"].as_slice(),
|
||||
).unwrap();
|
||||
registry.register(Box::new(query_sub.clone())).unwrap();
|
||||
registry.register(Box::new(query_db.clone())).unwrap();
|
||||
registry.register(Box::new(write_events.clone())).unwrap();
|
||||
registry.register(Box::new(sent_events.clone())).unwrap();
|
||||
registry.register(Box::new(connections.clone())).unwrap();
|
||||
registry.register(Box::new(db_connections.clone())).unwrap();
|
||||
registry.register(Box::new(query_aborts.clone())).unwrap();
|
||||
registry.register(Box::new(cmd_req.clone())).unwrap();
|
||||
registry.register(Box::new(cmd_event.clone())).unwrap();
|
||||
registry.register(Box::new(cmd_close.clone())).unwrap();
|
||||
registry.register(Box::new(disconnects.clone())).unwrap();
|
||||
let metrics = NostrMetrics {
|
||||
query_sub,
|
||||
query_db,
|
||||
write_events,
|
||||
sent_events,
|
||||
connections,
|
||||
db_connections,
|
||||
disconnects,
|
||||
query_aborts,
|
||||
cmd_req,
|
||||
cmd_event,
|
||||
cmd_close,
|
||||
};
|
||||
(registry,metrics)
|
||||
}
|
||||
|
||||
/// Start running a Nostr relay server.
|
||||
pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result<(), Error> {
|
||||
pub fn start_server(settings: &Settings, shutdown_rx: MpscReceiver<()>) -> Result<(), Error> {
|
||||
trace!("Config: {:?}", settings);
|
||||
// do some config validation.
|
||||
if !Path::new(&settings.database.data_directory).is_dir() {
|
||||
@@ -254,18 +343,18 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
let rt = Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.thread_name_fn(|| {
|
||||
// give each thread a unique numeric name
|
||||
static ATOMIC_ID: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(0);
|
||||
let id = ATOMIC_ID.fetch_add(1,Ordering::SeqCst);
|
||||
format!("tokio-ws-{}", id)
|
||||
})
|
||||
// limit concurrent SQLite blocking threads
|
||||
// give each thread a unique numeric name
|
||||
static ATOMIC_ID: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(0);
|
||||
let id = ATOMIC_ID.fetch_add(1,Ordering::SeqCst);
|
||||
format!("tokio-ws-{id}")
|
||||
})
|
||||
// limit concurrent SQLite blocking threads
|
||||
.max_blocking_threads(settings.limits.max_blocking_threads)
|
||||
.on_thread_start(|| {
|
||||
trace!("started new thread: {:?}", std::thread::current().name());
|
||||
})
|
||||
.on_thread_stop(|| {
|
||||
trace!("stopped thread: {:?}", std::thread::current().name());
|
||||
trace!("stopped thread: {:?}", std::thread::current().name());
|
||||
})
|
||||
.build()
|
||||
.unwrap();
|
||||
@@ -274,8 +363,6 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
let broadcast_buffer_limit = settings.limits.broadcast_buffer;
|
||||
let persist_buffer_limit = settings.limits.event_persist_buffer;
|
||||
let verified_users_active = settings.verified_users.is_active();
|
||||
let db_min_conn = settings.database.min_conn;
|
||||
let db_max_conn = settings.database.max_conn;
|
||||
let settings = settings.clone();
|
||||
info!("listening on: {}", socket_addr);
|
||||
// all client-submitted valid events are broadcast to every
|
||||
@@ -298,23 +385,28 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
// overwhelming this will drop events and won't register
|
||||
// metadata events.
|
||||
let (metadata_tx, metadata_rx) = broadcast::channel::<Event>(4096);
|
||||
// start the database writer thread. Give it a channel for
|
||||
|
||||
let (registry, metrics) = create_metrics();
|
||||
// build a repository for events
|
||||
let repo = db::build_repo(&settings, metrics.clone()).await;
|
||||
// start the database writer task. Give it a channel for
|
||||
// writing events, and for publishing events that have been
|
||||
// written (to all connected clients).
|
||||
db::db_writer(
|
||||
settings.clone(),
|
||||
event_rx,
|
||||
bcast_tx.clone(),
|
||||
metadata_tx.clone(),
|
||||
shutdown_listen,
|
||||
)
|
||||
.await;
|
||||
tokio::task::spawn(
|
||||
db::db_writer(
|
||||
repo.clone(),
|
||||
settings.clone(),
|
||||
event_rx,
|
||||
bcast_tx.clone(),
|
||||
metadata_tx.clone(),
|
||||
shutdown_listen,
|
||||
));
|
||||
info!("db writer created");
|
||||
|
||||
// create a nip-05 verifier thread; if enabled.
|
||||
if settings.verified_users.mode != VerifiedUsersMode::Disabled {
|
||||
let verifier_opt =
|
||||
nip05::Verifier::new(metadata_rx, bcast_tx.clone(), settings.clone());
|
||||
nip05::Verifier::new(repo.clone(), metadata_rx, bcast_tx.clone(), settings.clone());
|
||||
if let Ok(mut v) = verifier_opt {
|
||||
if verified_users_active {
|
||||
tokio::task::spawn(async move {
|
||||
@@ -324,22 +416,6 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
}
|
||||
}
|
||||
}
|
||||
// build a connection pool for DB maintenance
|
||||
let maintenance_pool = db::build_pool(
|
||||
"maintenance writer",
|
||||
&settings,
|
||||
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
|
||||
1,
|
||||
2,
|
||||
false,
|
||||
);
|
||||
|
||||
// Create a mutex that will block readers, so that a
|
||||
// checkpoint can be performed quickly.
|
||||
let safe_to_read = Arc::new(Mutex::new(0));
|
||||
|
||||
db::db_optimize_task(maintenance_pool.clone()).await;
|
||||
db::db_checkpoint_task(maintenance_pool, safe_to_read.clone()).await;
|
||||
|
||||
// listen for (external to tokio) shutdown request
|
||||
let controlled_shutdown = invoke_shutdown.clone();
|
||||
@@ -349,10 +425,9 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
Ok(()) => {
|
||||
info!("control message requesting shutdown");
|
||||
controlled_shutdown.send(()).ok();
|
||||
}
|
||||
},
|
||||
Err(std::sync::mpsc::RecvError) => {
|
||||
// FIXME: spurious error on startup?
|
||||
debug!("shutdown requestor is disconnected");
|
||||
trace!("shutdown requestor is disconnected (this is normal)");
|
||||
}
|
||||
};
|
||||
});
|
||||
@@ -366,41 +441,34 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
info!("shutting down due to SIGINT (main)");
|
||||
ctrl_c_shutdown.send(()).ok();
|
||||
});
|
||||
// build a connection pool for sqlite connections
|
||||
let pool = db::build_pool(
|
||||
"client query",
|
||||
&settings,
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
|
||||
db_min_conn,
|
||||
db_max_conn,
|
||||
true,
|
||||
);
|
||||
// spawn a task to check the pool size.
|
||||
let pool_monitor = pool.clone();
|
||||
tokio::spawn(async move {db::monitor_pool("reader", pool_monitor).await;});
|
||||
// spawn a task to check the pool size.
|
||||
//let pool_monitor = pool.clone();
|
||||
//tokio::spawn(async move {db::monitor_pool("reader", pool_monitor).await;});
|
||||
|
||||
// A `Service` is needed for every connection, so this
|
||||
// creates one from our `handle_request` function.
|
||||
let make_svc = make_service_fn(|conn: &AddrStream| {
|
||||
let svc_pool = pool.clone();
|
||||
let repo = repo.clone();
|
||||
let remote_addr = conn.remote_addr();
|
||||
let bcast = bcast_tx.clone();
|
||||
let event = event_tx.clone();
|
||||
let stop = invoke_shutdown.clone();
|
||||
let settings = settings.clone();
|
||||
let safe_to_read = safe_to_read.clone();
|
||||
let registry = registry.clone();
|
||||
let metrics = metrics.clone();
|
||||
async move {
|
||||
// service_fn converts our function into a `Service`
|
||||
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
|
||||
handle_web_request(
|
||||
request,
|
||||
svc_pool.clone(),
|
||||
repo.clone(),
|
||||
settings.clone(),
|
||||
remote_addr,
|
||||
bcast.clone(),
|
||||
event.clone(),
|
||||
stop.subscribe(),
|
||||
safe_to_read.clone(),
|
||||
registry.clone(),
|
||||
metrics.clone(),
|
||||
)
|
||||
}))
|
||||
}
|
||||
@@ -410,7 +478,7 @@ pub fn start_server(settings: Settings, shutdown_rx: MpscReceiver<()>) -> Result
|
||||
.with_graceful_shutdown(ctrl_c_or_signal(webserver_shutdown_listen));
|
||||
// run hyper in this thread. This is why the thread does not return.
|
||||
if let Err(e) = server.await {
|
||||
eprintln!("server error: {}", e);
|
||||
eprintln!("server error: {e}");
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
@@ -428,11 +496,15 @@ pub enum NostrMessage {
|
||||
CloseMsg(CloseCmd),
|
||||
}
|
||||
|
||||
/// Convert Message to NostrMessage
|
||||
fn convert_to_msg(msg: String, max_bytes: Option<usize>) -> Result<NostrMessage> {
|
||||
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
|
||||
/// Convert Message to `NostrMessage`
|
||||
fn convert_to_msg(msg: &str, max_bytes: Option<usize>) -> Result<NostrMessage> {
|
||||
let parsed_res: Result<NostrMessage> = serde_json::from_str(msg).map_err(std::convert::Into::into);
|
||||
match parsed_res {
|
||||
Ok(m) => {
|
||||
if let NostrMessage::SubMsg(_) = m {
|
||||
// note; this only prints the first 16k of a REQ and then truncates.
|
||||
trace!("REQ: {:?}",msg);
|
||||
};
|
||||
if let NostrMessage::EventMsg(_) = m {
|
||||
if let Some(max_size) = max_bytes {
|
||||
// check length, ensure that some max size is set.
|
||||
@@ -444,15 +516,15 @@ fn convert_to_msg(msg: String, max_bytes: Option<usize>) -> Result<NostrMessage>
|
||||
Ok(m)
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("proto parse error: {:?}", e);
|
||||
debug!("parse error on message: {}", msg.trim());
|
||||
trace!("proto parse error: {:?}", e);
|
||||
trace!("parse error on message: {:?}", msg.trim());
|
||||
Err(Error::ProtoParseError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Turn a string into a NOTICE message ready to send over a WebSocket
|
||||
fn make_notice_message(notice: Notice) -> Message {
|
||||
/// Turn a string into a NOTICE message ready to send over a `WebSocket`
|
||||
fn make_notice_message(notice: &Notice) -> Message {
|
||||
let json = match notice {
|
||||
Notice::Message(ref msg) => json!(["NOTICE", msg]),
|
||||
Notice::EventResult(ref res) => json!(["OK", res.id, res.status.to_bool(), res.msg]),
|
||||
@@ -469,15 +541,16 @@ struct ClientInfo {
|
||||
|
||||
/// Handle new client connections. This runs through an event loop
|
||||
/// for all client communication.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn nostr_server(
|
||||
pool: db::SqlitePool,
|
||||
repo: Arc<dyn NostrRepo>,
|
||||
client_info: ClientInfo,
|
||||
settings: Settings,
|
||||
mut ws_stream: WebSocketStream<Upgraded>,
|
||||
broadcast: Sender<Event>,
|
||||
event_tx: mpsc::Sender<SubmittedEvent>,
|
||||
mut shutdown: Receiver<()>,
|
||||
safe_to_read: Arc<Mutex<u64>>,
|
||||
metrics: NostrMetrics,
|
||||
) {
|
||||
// the time this websocket nostr server started
|
||||
let orig_start = Instant::now();
|
||||
@@ -504,7 +577,7 @@ async fn nostr_server(
|
||||
// we will send out the tx handle to any query we generate.
|
||||
// this has capacity for some of the larger requests we see, which
|
||||
// should allow the DB thread to release the handle earlier.
|
||||
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(20000);
|
||||
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(20_000);
|
||||
// Create channel for receiving NOTICEs
|
||||
let (notice_tx, mut notice_rx) = mpsc::channel::<Notice>(128);
|
||||
|
||||
@@ -528,19 +601,26 @@ async fn nostr_server(
|
||||
// and how many it received from queries.
|
||||
let mut client_published_event_count: usize = 0;
|
||||
let mut client_received_event_count: usize = 0;
|
||||
debug!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
|
||||
let origin = client_info.origin.unwrap_or_else(|| "<unspecified>".into());
|
||||
|
||||
let unspec = "<unspecified>".to_string();
|
||||
info!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
|
||||
let origin = client_info.origin.as_ref().unwrap_or_else(|| &unspec);
|
||||
let user_agent = client_info
|
||||
.user_agent
|
||||
.unwrap_or_else(|| "<unspecified>".into());
|
||||
debug!(
|
||||
.user_agent.as_ref()
|
||||
.unwrap_or_else(|| &unspec);
|
||||
info!(
|
||||
"cid: {}, origin: {:?}, user-agent: {:?}",
|
||||
cid, origin, user_agent
|
||||
);
|
||||
|
||||
// Measure connections
|
||||
metrics.connections.inc();
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = shutdown.recv() => {
|
||||
info!("Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?}", cid, conn.ip(), orig_start.elapsed());
|
||||
metrics.disconnects.with_label_values(&["shutdown"]).inc();
|
||||
info!("Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?}", cid, conn.ip(), orig_start.elapsed());
|
||||
// server shutting down, exit loop
|
||||
break;
|
||||
},
|
||||
@@ -549,22 +629,24 @@ async fn nostr_server(
|
||||
// if it has been too long, disconnect
|
||||
if last_message_time.elapsed() > max_quiet_time {
|
||||
debug!("ending connection due to lack of client ping response");
|
||||
metrics.disconnects.with_label_values(&["timeout"]).inc();
|
||||
break;
|
||||
}
|
||||
// Send a ping
|
||||
ws_stream.send(Message::Ping(Vec::new())).await.ok();
|
||||
},
|
||||
Some(notice_msg) = notice_rx.recv() => {
|
||||
ws_stream.send(make_notice_message(notice_msg)).await.ok();
|
||||
ws_stream.send(make_notice_message(¬ice_msg)).await.ok();
|
||||
},
|
||||
Some(query_result) = query_rx.recv() => {
|
||||
// database informed us of a query result we asked for
|
||||
let subesc = query_result.sub_id.replace('"', "");
|
||||
if query_result.event == "EOSE" {
|
||||
let send_str = format!("[\"EOSE\",\"{}\"]", subesc);
|
||||
let send_str = format!("[\"EOSE\",\"{subesc}\"]");
|
||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||
} else {
|
||||
client_received_event_count += 1;
|
||||
metrics.sent_events.with_label_values(&["db"]).inc();
|
||||
// send a result
|
||||
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
|
||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||
@@ -578,7 +660,6 @@ async fn nostr_server(
|
||||
if !sub.interested_in_event(&global_event) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO: serialize at broadcast time, instead of
|
||||
// once for each consumer.
|
||||
if let Ok(event_str) = serde_json::to_string(&global_event) {
|
||||
@@ -587,7 +668,8 @@ async fn nostr_server(
|
||||
global_event.get_event_id_prefix());
|
||||
// create an event response and send it
|
||||
let subesc = s.replace('"', "");
|
||||
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{}\",{}]", subesc, event_str))).await.ok();
|
||||
metrics.sent_events.with_label_values(&["realtime"]).inc();
|
||||
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{subesc}\",{event_str}]"))).await.ok();
|
||||
} else {
|
||||
warn!("could not serialize event: {:?}", global_event.get_event_id_prefix());
|
||||
}
|
||||
@@ -599,11 +681,11 @@ async fn nostr_server(
|
||||
// Consume text messages from the client, parse into Nostr messages.
|
||||
let nostr_msg = match ws_next {
|
||||
Some(Ok(Message::Text(m))) => {
|
||||
convert_to_msg(m,settings.limits.max_event_bytes)
|
||||
convert_to_msg(&m,settings.limits.max_event_bytes)
|
||||
},
|
||||
Some(Ok(Message::Binary(_))) => {
|
||||
ws_stream.send(
|
||||
make_notice_message(Notice::message("binary messages are not accepted".into()))).await.ok();
|
||||
Some(Ok(Message::Binary(_))) => {
|
||||
ws_stream.send(
|
||||
make_notice_message(&Notice::message("binary messages are not accepted".into()))).await.ok();
|
||||
continue;
|
||||
},
|
||||
Some(Ok(Message::Ping(_) | Message::Pong(_))) => {
|
||||
@@ -611,27 +693,32 @@ async fn nostr_server(
|
||||
// send responses automatically.
|
||||
continue;
|
||||
},
|
||||
Some(Err(WsError::Capacity(MessageTooLong{size, max_size}))) => {
|
||||
ws_stream.send(
|
||||
make_notice_message(Notice::message(format!("message too large ({} > {})",size, max_size)))).await.ok();
|
||||
Some(Err(WsError::Capacity(MessageTooLong{size, max_size}))) => {
|
||||
ws_stream.send(
|
||||
make_notice_message(&Notice::message(format!("message too large ({size} > {max_size})")))).await.ok();
|
||||
continue;
|
||||
},
|
||||
},
|
||||
None |
|
||||
Some(Ok(Message::Close(_)) |
|
||||
Err(WsError::AlreadyClosed | WsError::ConnectionClosed |
|
||||
WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
|
||||
Some(Ok(Message::Close(_)) |
|
||||
Err(WsError::AlreadyClosed | WsError::ConnectionClosed |
|
||||
WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
|
||||
=> {
|
||||
debug!("websocket close from client (cid: {}, ip: {:?})",cid, conn.ip());
|
||||
break;
|
||||
},
|
||||
metrics.disconnects.with_label_values(&["normal"]).inc();
|
||||
break;
|
||||
},
|
||||
Some(Err(WsError::Io(e))) => {
|
||||
// IO errors are considered fatal
|
||||
warn!("IO error (cid: {}, ip: {:?}): {:?}", cid, conn.ip(), e);
|
||||
metrics.disconnects.with_label_values(&["error"]).inc();
|
||||
|
||||
break;
|
||||
}
|
||||
x => {
|
||||
// default condition on error is to close the client connection
|
||||
info!("unknown error (cid: {}, ip: {:?}): {:?} (closing conn)", cid, conn.ip(), x);
|
||||
metrics.disconnects.with_label_values(&["error"]).inc();
|
||||
|
||||
break;
|
||||
}
|
||||
};
|
||||
@@ -643,28 +730,29 @@ async fn nostr_server(
|
||||
// handle each type of message
|
||||
let evid = ec.event_id().to_owned();
|
||||
let parsed : Result<Event> = Result::<Event>::from(ec);
|
||||
metrics.cmd_event.inc();
|
||||
match parsed {
|
||||
Ok(e) => {
|
||||
let id_prefix:String = e.id.chars().take(8).collect();
|
||||
debug!("successfully parsed/validated event: {:?} (cid: {})", id_prefix, cid);
|
||||
debug!("successfully parsed/validated event: {:?} (cid: {}, kind: {})", id_prefix, cid, e.kind);
|
||||
// check if the event is too far in the future.
|
||||
if e.is_valid_timestamp(settings.options.reject_future_seconds) {
|
||||
// Write this to the database.
|
||||
let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone() };
|
||||
let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone(), source_ip: conn.ip().to_string(), origin: client_info.origin.clone(), user_agent: client_info.user_agent.clone()};
|
||||
event_tx.send(submit_event).await.ok();
|
||||
client_published_event_count += 1;
|
||||
} else {
|
||||
info!("client: {} sent a far future-dated event", cid);
|
||||
if let Some(fut_sec) = settings.options.reject_future_seconds {
|
||||
let msg = format!("The event created_at field is out of the acceptable range (+{}sec) for this relay.",fut_sec);
|
||||
let notice = Notice::invalid(e.id, &msg);
|
||||
ws_stream.send(make_notice_message(notice)).await.ok();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("client: {} sent a far future-dated event", cid);
|
||||
if let Some(fut_sec) = settings.options.reject_future_seconds {
|
||||
let msg = format!("The event created_at field is out of the acceptable range (+{fut_sec}sec) for this relay.");
|
||||
let notice = Notice::invalid(e.id, &msg);
|
||||
ws_stream.send(make_notice_message(¬ice)).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
info!("client sent an invalid event (cid: {})", cid);
|
||||
ws_stream.send(make_notice_message(Notice::invalid(evid, &format!("{}", e)))).await.ok();
|
||||
ws_stream.send(make_notice_message(&Notice::invalid(evid, &format!("{e}")))).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -675,61 +763,63 @@ async fn nostr_server(
|
||||
// * registering the subscription so future events can be matched
|
||||
// * making a channel to cancel to request later
|
||||
// * sending a request for a SQL query
|
||||
// Do nothing if the sub already exists.
|
||||
if !conn.has_subscription(&s) {
|
||||
if let Some(ref lim) = sub_lim_opt {
|
||||
lim.until_ready_with_jitter(jitter).await;
|
||||
}
|
||||
// Do nothing if the sub already exists.
|
||||
if conn.has_subscription(&s) {
|
||||
info!("client sent duplicate subscription, ignoring (cid: {}, sub: {:?})", cid, s.id);
|
||||
} else {
|
||||
metrics.cmd_req.inc();
|
||||
if let Some(ref lim) = sub_lim_opt {
|
||||
lim.until_ready_with_jitter(jitter).await;
|
||||
}
|
||||
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
|
||||
match conn.subscribe(s.clone()) {
|
||||
Ok(()) => {
|
||||
Ok(()) => {
|
||||
// when we insert, if there was a previous query running with the same name, cancel it.
|
||||
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
|
||||
previous_query.send(()).ok();
|
||||
if let Some(previous_query) = running_queries.insert(s.id.clone(), abandon_query_tx) {
|
||||
previous_query.send(()).ok();
|
||||
}
|
||||
if s.needs_historical_events() {
|
||||
// start a database query. this spawns a blocking database query on a worker thread.
|
||||
db::db_query(s, cid.to_owned(), pool.clone(), query_tx.clone(), abandon_query_rx,safe_to_read.clone()).await;
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
info!("Subscription error: {} (cid: {}, sub: {:?})", e, cid, s.id);
|
||||
ws_stream.send(make_notice_message(Notice::message(format!("Subscription error: {}", e)))).await.ok();
|
||||
}
|
||||
if s.needs_historical_events() {
|
||||
// start a database query. this spawns a blocking database query on a worker thread.
|
||||
repo.query_subscription(s, cid.clone(), query_tx.clone(), abandon_query_rx).await.ok();
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
info!("Subscription error: {} (cid: {}, sub: {:?})", e, cid, s.id);
|
||||
ws_stream.send(make_notice_message(&Notice::message(format!("Subscription error: {e}")))).await.ok();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("client sent duplicate subscription, ignoring (cid: {}, sub: {:?})", cid, s.id);
|
||||
}
|
||||
}
|
||||
},
|
||||
Ok(NostrMessage::CloseMsg(cc)) => {
|
||||
// closing a request simply removes the subscription.
|
||||
let parsed : Result<Close> = Result::<Close>::from(cc);
|
||||
if let Ok(c) = parsed {
|
||||
// check if a query is currently
|
||||
// running, and remove it if so.
|
||||
let stop_tx = running_queries.remove(&c.id);
|
||||
if let Some(tx) = stop_tx {
|
||||
tx.send(()).ok();
|
||||
}
|
||||
// stop checking new events against
|
||||
// the subscription
|
||||
conn.unsubscribe(&c);
|
||||
} else {
|
||||
info!("invalid command ignored");
|
||||
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
|
||||
if let Ok(c) = parsed {
|
||||
metrics.cmd_close.inc();
|
||||
// check if a query is currently
|
||||
// running, and remove it if so.
|
||||
let stop_tx = running_queries.remove(&c.id);
|
||||
if let Some(tx) = stop_tx {
|
||||
tx.send(()).ok();
|
||||
}
|
||||
// stop checking new events against
|
||||
// the subscription
|
||||
conn.unsubscribe(&c);
|
||||
} else {
|
||||
info!("invalid command ignored");
|
||||
ws_stream.send(make_notice_message(&Notice::message("could not parse command".into()))).await.ok();
|
||||
}
|
||||
},
|
||||
Err(Error::ConnError) => {
|
||||
debug!("got connection close/error, disconnecting cid: {}, ip: {:?}",cid, conn.ip());
|
||||
break;
|
||||
}
|
||||
Err(Error::EventMaxLengthError(s)) => {
|
||||
info!("client sent event larger ({} bytes) than max size (cid: {})", s, cid);
|
||||
ws_stream.send(make_notice_message(Notice::message("event exceeded max size".into()))).await.ok();
|
||||
info!("client sent command larger ({} bytes) than max size (cid: {})", s, cid);
|
||||
ws_stream.send(make_notice_message(&Notice::message("event exceeded max size".into()))).await.ok();
|
||||
},
|
||||
Err(Error::ProtoParseError) => {
|
||||
info!("client sent event that could not be parsed (cid: {})", cid);
|
||||
ws_stream.send(make_notice_message(Notice::message("could not parse command".into()))).await.ok();
|
||||
info!("client sent command that could not be parsed (cid: {})", cid);
|
||||
ws_stream.send(make_notice_message(&Notice::message("could not parse command".into()))).await.ok();
|
||||
},
|
||||
Err(e) => {
|
||||
info!("got non-fatal error from client (cid: {}, error: {:?}", cid, e);
|
||||
@@ -748,6 +838,22 @@ async fn nostr_server(
|
||||
conn.ip(),
|
||||
client_published_event_count,
|
||||
client_received_event_count,
|
||||
orig_start.elapsed()
|
||||
orig_start.elapsed()
|
||||
);
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct NostrMetrics {
|
||||
pub query_sub: Histogram, // response time of successful subscriptions
|
||||
pub query_db: Histogram, // individual database query execution time
|
||||
pub db_connections: IntGauge, // database connections in use
|
||||
pub write_events: Histogram, // response time of event writes
|
||||
pub sent_events: IntCounterVec, // count of events sent to clients
|
||||
pub connections: IntCounter, // count of websocket connections
|
||||
pub disconnects: IntCounterVec, // client disconnects
|
||||
pub query_aborts: IntCounterVec, // count of queries aborted by server
|
||||
pub cmd_req: IntCounter, // count of REQ commands received
|
||||
pub cmd_event: IntCounter, // count of EVENT commands received
|
||||
pub cmd_close: IntCounter, // count of CLOSE commands received
|
||||
|
||||
}
|
||||
|
@@ -2,7 +2,8 @@
|
||||
use crate::error::Result;
|
||||
use crate::event::Event;
|
||||
use serde::de::Unexpected;
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
use serde::ser::SerializeMap;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
@@ -19,7 +20,7 @@ pub struct Subscription {
|
||||
/// Corresponds to client-provided subscription request elements. Any
|
||||
/// element can be present if it should be used in filtering, or
|
||||
/// absent ([`None`]) if it should be ignored.
|
||||
#[derive(Serialize, PartialEq, Eq, Debug, Clone)]
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
pub struct ReqFilter {
|
||||
/// Event hashes
|
||||
pub ids: Option<Vec<String>>,
|
||||
@@ -34,7 +35,6 @@ pub struct ReqFilter {
|
||||
/// Limit number of results
|
||||
pub limit: Option<u64>,
|
||||
/// Set of tags
|
||||
#[serde(skip)]
|
||||
pub tags: Option<HashMap<char, HashSet<String>>>,
|
||||
/// Force no matches due to malformed data
|
||||
// we can't represent it in the req filter, so we don't want to
|
||||
@@ -43,6 +43,40 @@ pub struct ReqFilter {
|
||||
pub force_no_match: bool,
|
||||
}
|
||||
|
||||
impl Serialize for ReqFilter {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where S:Serializer,
|
||||
{
|
||||
let mut map = serializer.serialize_map(None)?;
|
||||
if let Some(ids) = &self.ids {
|
||||
map.serialize_entry("ids", &ids)?;
|
||||
}
|
||||
if let Some(kinds) = &self.kinds {
|
||||
map.serialize_entry("kinds", &kinds)?;
|
||||
}
|
||||
if let Some(until) = &self.until {
|
||||
map.serialize_entry("until", until)?;
|
||||
}
|
||||
if let Some(since) = &self.since {
|
||||
map.serialize_entry("since", since)?;
|
||||
}
|
||||
if let Some(limit) = &self.limit {
|
||||
map.serialize_entry("limit", limit)?;
|
||||
}
|
||||
if let Some(authors) = &self.authors {
|
||||
map.serialize_entry("authors", &authors)?;
|
||||
}
|
||||
// serialize tags
|
||||
if let Some(tags) = &self.tags {
|
||||
for (k,v) in tags {
|
||||
let vals:Vec<&String> = v.iter().collect();
|
||||
map.serialize_entry(&format!("#{k}"), &vals)?;
|
||||
}
|
||||
}
|
||||
map.end()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ReqFilter {
|
||||
fn deserialize<D>(deserializer: D) -> Result<ReqFilter, D::Error>
|
||||
where
|
||||
@@ -65,21 +99,21 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
||||
tags: None,
|
||||
force_no_match: false,
|
||||
};
|
||||
let empty_string = "".into();
|
||||
let empty_string = "".into();
|
||||
let mut ts = None;
|
||||
// iterate through each key, and assign values that exist
|
||||
for (key, val) in filter.into_iter() {
|
||||
for (key, val) in filter {
|
||||
// ids
|
||||
if key == "ids" {
|
||||
let raw_ids: Option<Vec<String>>= Deserialize::deserialize(val).ok();
|
||||
if let Some(a) = raw_ids.as_ref() {
|
||||
if a.contains(&empty_string) {
|
||||
return Err(serde::de::Error::invalid_type(
|
||||
Unexpected::Other("prefix matches must not be empty strings"),
|
||||
&"a json object"));
|
||||
}
|
||||
}
|
||||
rf.ids =raw_ids;
|
||||
let raw_ids: Option<Vec<String>>= Deserialize::deserialize(val).ok();
|
||||
if let Some(a) = raw_ids.as_ref() {
|
||||
if a.contains(&empty_string) {
|
||||
return Err(serde::de::Error::invalid_type(
|
||||
Unexpected::Other("prefix matches must not be empty strings"),
|
||||
&"a json object"));
|
||||
}
|
||||
}
|
||||
rf.ids =raw_ids;
|
||||
} else if key == "kinds" {
|
||||
rf.kinds = Deserialize::deserialize(val).ok();
|
||||
} else if key == "since" {
|
||||
@@ -90,14 +124,14 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
||||
rf.limit = Deserialize::deserialize(val).ok();
|
||||
} else if key == "authors" {
|
||||
let raw_authors: Option<Vec<String>>= Deserialize::deserialize(val).ok();
|
||||
if let Some(a) = raw_authors.as_ref() {
|
||||
if a.contains(&empty_string) {
|
||||
return Err(serde::de::Error::invalid_type(
|
||||
Unexpected::Other("prefix matches must not be empty strings"),
|
||||
&"a json object"));
|
||||
}
|
||||
}
|
||||
rf.authors = raw_authors;
|
||||
if let Some(a) = raw_authors.as_ref() {
|
||||
if a.contains(&empty_string) {
|
||||
return Err(serde::de::Error::invalid_type(
|
||||
Unexpected::Other("prefix matches must not be empty strings"),
|
||||
&"a json object"));
|
||||
}
|
||||
}
|
||||
rf.authors = raw_authors;
|
||||
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
|
||||
if let Some(tag_search) = tag_search_char_from_filter(key) {
|
||||
if ts.is_none() {
|
||||
@@ -107,7 +141,7 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
||||
if let Some(m) = ts.as_mut() {
|
||||
let tag_vals: Option<Vec<String>> = Deserialize::deserialize(val).ok();
|
||||
if let Some(v) = tag_vals {
|
||||
let hs = HashSet::from_iter(v.into_iter());
|
||||
let hs = v.into_iter().collect::<HashSet<_>>();
|
||||
m.insert(tag_search.to_owned(), hs);
|
||||
}
|
||||
};
|
||||
@@ -188,6 +222,7 @@ impl<'de> Deserialize<'de> for Subscription {
|
||||
// create indexes
|
||||
filters.push(f);
|
||||
}
|
||||
filters.dedup();
|
||||
Ok(Subscription {
|
||||
id: sub_id.to_owned(),
|
||||
filters,
|
||||
@@ -197,20 +232,20 @@ impl<'de> Deserialize<'de> for Subscription {
|
||||
|
||||
impl Subscription {
|
||||
/// Get a copy of the subscription identifier.
|
||||
pub fn get_id(&self) -> String {
|
||||
#[must_use] pub fn get_id(&self) -> String {
|
||||
self.id.clone()
|
||||
}
|
||||
|
||||
/// Determine if any filter is requesting historical (database)
|
||||
/// queries. If every filter has limit:0, we do not need to query the DB.
|
||||
pub fn needs_historical_events(&self) -> bool {
|
||||
self.filters.iter().any(|f| f.limit!=Some(0))
|
||||
#[must_use] pub fn needs_historical_events(&self) -> bool {
|
||||
self.filters.iter().any(|f| f.limit!=Some(0))
|
||||
}
|
||||
|
||||
/// Determine if this subscription matches a given [`Event`]. Any
|
||||
/// individual filter match is sufficient.
|
||||
pub fn interested_in_event(&self, event: &Event) -> bool {
|
||||
for f in self.filters.iter() {
|
||||
#[must_use] pub fn interested_in_event(&self, event: &Event) -> bool {
|
||||
for f in &self.filters {
|
||||
if f.interested_in_event(event) {
|
||||
return true;
|
||||
}
|
||||
@@ -233,23 +268,20 @@ impl ReqFilter {
|
||||
fn ids_match(&self, event: &Event) -> bool {
|
||||
self.ids
|
||||
.as_ref()
|
||||
.map(|vs| prefix_match(vs, &event.id))
|
||||
.unwrap_or(true)
|
||||
.map_or(true, |vs| prefix_match(vs, &event.id))
|
||||
}
|
||||
|
||||
fn authors_match(&self, event: &Event) -> bool {
|
||||
self.authors
|
||||
.as_ref()
|
||||
.map(|vs| prefix_match(vs, &event.pubkey))
|
||||
.unwrap_or(true)
|
||||
.map_or(true, |vs| prefix_match(vs, &event.pubkey))
|
||||
}
|
||||
|
||||
fn delegated_authors_match(&self, event: &Event) -> bool {
|
||||
if let Some(delegated_pubkey) = &event.delegated_by {
|
||||
self.authors
|
||||
.as_ref()
|
||||
.map(|vs| prefix_match(vs, delegated_pubkey))
|
||||
.unwrap_or(true)
|
||||
.map_or(true, |vs| prefix_match(vs, delegated_pubkey))
|
||||
} else {
|
||||
false
|
||||
}
|
||||
@@ -275,16 +307,15 @@ impl ReqFilter {
|
||||
fn kind_match(&self, kind: u64) -> bool {
|
||||
self.kinds
|
||||
.as_ref()
|
||||
.map(|ks| ks.contains(&kind))
|
||||
.unwrap_or(true)
|
||||
.map_or(true, |ks| ks.contains(&kind))
|
||||
}
|
||||
|
||||
/// Determine if all populated fields in this filter match the provided event.
|
||||
pub fn interested_in_event(&self, event: &Event) -> bool {
|
||||
#[must_use] pub fn interested_in_event(&self, event: &Event) -> bool {
|
||||
// self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
|
||||
self.ids_match(event)
|
||||
&& self.since.map(|t| event.created_at > t).unwrap_or(true)
|
||||
&& self.until.map(|t| event.created_at < t).unwrap_or(true)
|
||||
&& self.since.map_or(true, |t| event.created_at > t)
|
||||
&& self.until.map_or(true, |t| event.created_at < t)
|
||||
&& self.kind_match(event.kind)
|
||||
&& (self.authors_match(event) || self.delegated_authors_match(event))
|
||||
&& self.tag_match(event)
|
||||
@@ -320,19 +351,19 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn req_empty_authors_prefix() {
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"authors\": [\"\"]}]";
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"authors\": [\"\"]}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn req_empty_ids_prefix() {
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\"]}]";
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\"]}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn req_empty_ids_prefix_mixed() {
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\",\"aaa\"]}]";
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\",\"aaa\"]}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
}
|
||||
|
||||
@@ -343,6 +374,23 @@ mod tests {
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dupe_filter() -> Result<()> {
|
||||
let raw_json = r#"["REQ","some-id",{"kinds": [1984]}, {"kinds": [1984]}]"#;
|
||||
let s: Subscription = serde_json::from_str(raw_json)?;
|
||||
assert_eq!(s.filters.len(), 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dupe_filter_many() -> Result<()> {
|
||||
// duplicate filters in different order
|
||||
let raw_json = r#"["REQ","some-id",{"kinds":[1984]},{"kinds":[1984]},{"kinds":[1984]},{"kinds":[1984]}]"#;
|
||||
let s: Subscription = serde_json::from_str(raw_json)?;
|
||||
assert_eq!(s.filters.len(), 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn author_filter() -> Result<()> {
|
||||
let raw_json = r#"["REQ","some-id",{"authors": ["test-author-id"]}]"#;
|
||||
@@ -574,4 +622,22 @@ mod tests {
|
||||
assert!(!s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_filter() -> Result<()> {
|
||||
let s: Subscription = serde_json::from_str(r##"["REQ","xyz",{"authors":["abc", "bcd"], "since": 10, "until": 20, "limit":100, "#e": ["foo", "bar"], "#d": ["test"]}]"##)?;
|
||||
let f = s.filters.get(0);
|
||||
let serialized = serde_json::to_string(&f)?;
|
||||
let serialized_wrapped = format!(r##"["REQ", "xyz",{}]"##, serialized);
|
||||
let parsed: Subscription = serde_json::from_str(&serialized_wrapped)?;
|
||||
let parsed_filter = parsed.filters.get(0);
|
||||
if let Some(pf) = parsed_filter {
|
||||
assert_eq!(pf.since, Some(10));
|
||||
assert_eq!(pf.until, Some(20));
|
||||
assert_eq!(pf.limit, Some(100));
|
||||
} else {
|
||||
assert!(false, "filter could not be parsed");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
35
src/utils.rs
35
src/utils.rs
@@ -1,8 +1,9 @@
|
||||
//! Common utility functions
|
||||
use bech32::FromBase32;
|
||||
use std::time::SystemTime;
|
||||
|
||||
/// Seconds since 1970.
|
||||
pub fn unix_time() -> u64 {
|
||||
#[must_use] pub fn unix_time() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map(|x| x.as_secs())
|
||||
@@ -10,12 +11,23 @@ pub fn unix_time() -> u64 {
|
||||
}
|
||||
|
||||
/// Check if a string contains only hex characters.
|
||||
pub fn is_hex(s: &str) -> bool {
|
||||
#[must_use] pub fn is_hex(s: &str) -> bool {
|
||||
s.chars().all(|x| char::is_ascii_hexdigit(&x))
|
||||
}
|
||||
|
||||
/// Check if string is a nip19 string
|
||||
pub fn is_nip19(s: &str) -> bool {
|
||||
s.starts_with("npub") || s.starts_with("note")
|
||||
}
|
||||
|
||||
pub fn nip19_to_hex(s: &str) -> Result<String, bech32::Error> {
|
||||
let (_hrp, data, _checksum) = bech32::decode(s)?;
|
||||
let data = Vec::<u8>::from_base32(&data)?;
|
||||
Ok(hex::encode(data))
|
||||
}
|
||||
|
||||
/// Check if a string contains only lower-case hex chars.
|
||||
pub fn is_lower_hex(s: &str) -> bool {
|
||||
#[must_use] pub fn is_lower_hex(s: &str) -> bool {
|
||||
s.chars().all(|x| {
|
||||
(char::is_ascii_lowercase(&x) || char::is_ascii_digit(&x)) && char::is_ascii_hexdigit(&x)
|
||||
})
|
||||
@@ -30,4 +42,21 @@ mod tests {
|
||||
let hexstr = "abcd0123";
|
||||
assert_eq!(is_lower_hex(hexstr), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nip19() {
|
||||
let hexkey = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d";
|
||||
let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6";
|
||||
assert_eq!(is_nip19(hexkey), false);
|
||||
assert_eq!(is_nip19(nip19key), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nip19_hex() {
|
||||
let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6";
|
||||
let expected = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d";
|
||||
let got = nip19_to_hex(nip19key).unwrap();
|
||||
|
||||
assert_eq!(expected, got);
|
||||
}
|
||||
}
|
||||
|
10
tests/cli.rs
Normal file
10
tests/cli.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use nostr_rs_relay::cli::CLIArgs;
|
||||
|
||||
#[test]
|
||||
fn cli_tests() {
|
||||
use clap::CommandFactory;
|
||||
CLIArgs::command().debug_assert();
|
||||
}
|
||||
}
|
@@ -36,9 +36,9 @@ pub fn start_relay() -> Result<Relay> {
|
||||
settings.database.min_conn = 4;
|
||||
settings.database.max_conn = 8;
|
||||
let (shutdown_tx, shutdown_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel();
|
||||
let handle = thread::spawn(|| {
|
||||
let handle = thread::spawn(move || {
|
||||
// server will block the thread it is run on.
|
||||
let _ = start_server(settings, shutdown_rx);
|
||||
let _ = start_server(&settings, shutdown_rx);
|
||||
});
|
||||
// how do we know the relay has finished starting up?
|
||||
Ok(Relay {
|
||||
|
Reference in New Issue
Block a user