Compare commits

...

67 Commits
0.3.0 ... 0.5.2

Author SHA1 Message Date
Greg Heartsfield
048199e30b build: bump version to 0.5.2 2022-02-26 11:22:16 -06:00
Greg Heartsfield
414e83f696 refactor: import cleanup for config 2022-02-26 11:16:12 -06:00
Greg Heartsfield
225c8f762e improvement: upgrade dependencies; config, tungstenite, tokio 2022-02-26 09:55:12 -06:00
Greg Heartsfield
887fc28ab2 fix: until filters in subscriptions now used 2022-02-26 09:15:45 -06:00
Greg Heartsfield
294d3b99c3 fix: correct imports for test cases 2022-02-26 09:07:07 -06:00
Greg Heartsfield
53990672ae improvement: move db pool operations closer to query, do not panic on failure 2022-02-23 16:38:16 -06:00
Greg Heartsfield
9c1b21cbfe improvement: more granular perf logging for SQL queries 2022-02-21 09:03:05 -06:00
Greg Heartsfield
2f63417646 improvement: better logging for connection resets 2022-02-21 08:57:07 -06:00
Greg Heartsfield
3b25160852 fix: abort on connection IO errors 2022-02-21 08:50:46 -06:00
Greg Heartsfield
34ad549cde fix: update event buffer size comment in config 2022-02-20 11:46:24 -06:00
Greg Heartsfield
f8b1fe5035 docs: line up comments with code 2022-02-17 16:18:05 -06:00
Greg Heartsfield
f2001dc34a build: bump version to 0.5.1 2022-02-13 09:38:45 -06:00
Greg Heartsfield
b593001229 fix: remove setting from example config 2022-02-13 09:37:05 -06:00
Greg Heartsfield
5913b9f87a feat: send notices when authorization checks fail 2022-02-13 09:35:54 -06:00
Greg Heartsfield
77f35f9f43 feat: server-side pings and disconnects 2022-02-12 16:57:26 -06:00
Greg Heartsfield
9e06cc9482 improvement: better error messages on parse failures 2022-02-12 16:33:29 -06:00
Greg Heartsfield
e66fa4ac42 refactor: remove unnecessary Option wrapping 2022-02-12 16:29:27 -06:00
Greg Heartsfield
99e117f620 improvement: better handling of out-of-protocol messages 2022-02-12 16:26:55 -06:00
Greg Heartsfield
8250e00f05 fix: remove protostream module, and missing NOTICE 2022-02-12 16:22:12 -06:00
Greg Heartsfield
c9f87ec563 docs: NIP-05 feature note in README 2022-02-12 16:19:46 -06:00
Greg Heartsfield
ceaa01e8b4 fix: removed manual nostr stream, so websocket pings work 2022-02-12 16:19:10 -06:00
Greg Heartsfield
bc68cd0c74 build: bump version to 0.5.0 2022-02-12 14:10:44 -06:00
Greg Heartsfield
97589006fa improvement: upgrade dependencies 2022-02-12 14:10:03 -06:00
Greg Heartsfield
e31d0729f2 chore: comment cleanup 2022-02-12 13:49:52 -06:00
Greg Heartsfield
89d96e7ccd improvement: upgraded database schema to drop legacy tables
Database schema is upgraded to version 5.  Legacy event and pubkey
tables are dropped, and indexes are added for NIP-05 verification.
2022-02-12 13:47:03 -06:00
Greg Heartsfield
7056aae227 refactor: create schema module 2022-02-12 09:58:42 -06:00
Greg Heartsfield
753df47443 refactor: create utils/hexrange utility modules 2022-02-12 09:29:38 -06:00
Greg Heartsfield
26a0ce2b32 docs: function/struct comments 2022-02-12 09:29:35 -06:00
Greg Heartsfield
fa66a0265e docs: module headers 2022-02-12 09:29:31 -06:00
Greg Heartsfield
234a8ba0ac feat: limit event publishing to NIP-05 verified users
This adds a new configurable feature to restrict event publishing to
only users with NIP-05 verified metadata.  Domains can be whitelisted
or blacklisted.  Verification expiration and schedules are
configurable.

This upgrades the database to add a table for tracking verification
records.
2022-02-12 09:29:25 -06:00
Greg Heartsfield
f679fa0893 build: bump version to 0.4.2 2022-01-30 15:19:41 -06:00
Greg Heartsfield
4cc313fa2d fix: cleanup database connections with same name
When a large number of subscriptions is created with identical names,
we do not send a signal over the abandon-read channel.  This
eventually leads to resource exhaustion.
2022-01-30 15:14:02 -06:00
Greg Heartsfield
6502f7dcd7 fix: do not panic when validating events with malformed pubkeys 2022-01-29 13:19:34 -06:00
Greg Heartsfield
6ca3e3ffea build: bump version to 0.4.1 2022-01-26 21:48:44 -06:00
Greg Heartsfield
49c668a07c improvement: upgrade dependency (h2) 2022-01-26 21:48:11 -06:00
Greg Heartsfield
98c6fa6f39 feat: allow whitelisting of pubkeys for new events
This adds a configuration option, `authorization.pubkey_whitelist`
which is an array of pubkeys that are allowed to publish events on
this relay.
2022-01-26 21:39:03 -06:00
Greg Heartsfield
452bbbb0e5 docs: update feature list (NIP-12, prefix search) 2022-01-26 07:24:04 -06:00
Greg Heartsfield
ee0de6f875 improvement: clearer and less verbose database logging 2022-01-25 21:42:43 -06:00
Greg Heartsfield
699489ebaf build: bump version to 0.4.0 2022-01-25 20:56:00 -06:00
Greg Heartsfield
af9da65f71 improvement: upgrade dependencies 2022-01-25 20:55:29 -06:00
Greg Heartsfield
a72eaec3b8 fix: never display hidden events 2022-01-25 20:48:46 -06:00
Greg Heartsfield
f1206e76f2 feat: database reader connection pooling
Added connection pooling for queries, as well as basic configuration
options for min/max connections.
2022-01-25 20:39:24 -06:00
Greg Heartsfield
af453548ee feat: allow author and event id prefix search
This is an experimental non-NIP feature that allows a subscription
filter to include a prefix for authors and events.
2022-01-25 18:23:08 -06:00
Greg Heartsfield
df251c821c docs: updated discord invite link 2022-01-25 07:43:15 -06:00
Greg Heartsfield
2d28a95ff7 feat: allow arbitrary tag queries
This is an experimental feature, outside of any NIP, that demonstrates
generic tag queries.

Instead of limiting subscription filters to just querying only "e" or
"p" tags (via `#e` or `#p` attributes), any tag can be queried.

As an example, consider an event which uses a tag "url".  With this
modification, a subscription filter could add a top-level field
"#url", with an array of strings as the key.  Exact matches would be
returned.

A NIP is forthcoming to formalize this.
2022-01-22 21:29:15 -06:00
Greg Heartsfield
8c93ef5bc2 docs: provide public docker hub link 2022-01-20 22:02:42 -06:00
Greg Heartsfield
1c0fc1326d docs: add timeout for reverse-proxy example 2022-01-19 21:19:12 -06:00
Raj
179928378e refactor: add strictly typed tags
* Add custom error variant

This can be useful to propagate errors not conforming to available
variants. Also to convert other errors in `crate::Error` without having
explicit conversion defined, with `error.to_string()`

* Implement `Tag` and define protocol serialization

A Tag structure have been implemented with dedicated field types. Then
custom serde serialization is derived to map the structure to current
protocol json array as per NIP01.

This adds compile and run time type checking to always ensure wrong
string data are never stored or processed. With strict typed fields and
custom serde derivation this checks can be done at time of serialization,
saving work for internal handling of the actual data.

tests for possible data violations are added, and gives good example of
kind of errors it will through for different cases.

* Use String for URL
2022-01-19 07:42:58 -06:00
Raj
c605d75bb4 docs: update readme to include the new discord server 2022-01-17 08:35:13 -06:00
Greg Heartsfield
81e4e2b892 feat: add supported NIPs (2, 11) to relay info 2022-01-16 08:37:21 -06:00
Greg Heartsfield
6f166433b5 fix: test failures 2022-01-16 08:36:52 -06:00
Greg Heartsfield
030b64de62 feat: replace email with contact field in relay info.
This finalizes the NIP-11 spec implementation.

Fixes https://todo.sr.ht/~gheartsfield/nostr-rs-relay/21.
2022-01-16 08:34:19 -06:00
Greg Heartsfield
c7eadb1154 Add feature list to README 2022-01-16 08:16:42 -06:00
Greg Heartsfield
62dc77369d docs: rename example relay server 2022-01-15 11:43:12 -06:00
Greg Heartsfield
24587435ca docs: reverse proxy example 2022-01-15 11:41:31 -06:00
Greg Heartsfield
a3124ccea4 improvement: better sql error handling 2022-01-15 09:42:53 -06:00
Greg Heartsfield
4e51e61d16 improvement: display rate limit messages max once per sec 2022-01-15 09:42:17 -06:00
Raj
5c8390bbe0 fix: fix some test failures 2022-01-14 14:27:12 -06:00
Greg Heartsfield
da7968efef fix: restore working websocket message size configuration options 2022-01-05 17:41:12 -05:00
Greg Heartsfield
7037555516 improvement: add indexed tag queries 2022-01-05 17:33:53 -05:00
Greg Heartsfield
19ed990c57 refactor: fix clippy errors for relay info response 2022-01-05 10:10:44 -05:00
Greg Heartsfield
d78bbfc290 build: bump version to 0.3.3 2022-01-03 22:07:15 -05:00
Greg Heartsfield
2924da88bc feat: incorporated improvements from NIP-11 discussion
Change descr to description.  Add `id` for websocket URL.  Use
integers for supported NIPs instead of strings.  Top-level is object,
instead of the array before.
2022-01-03 22:03:30 -05:00
Greg Heartsfield
3024e9fba4 build: bump version to 0.3.2 2022-01-03 18:43:17 -05:00
Greg Heartsfield
d3da4eb009 feat: implementation of proposed NIP-11 (server metadata) 2022-01-03 18:42:24 -05:00
Greg Heartsfield
19637d612e build: bump version to 0.3.1 2022-01-01 19:26:15 -06:00
Greg Heartsfield
afc9a0096a improvement: logging failed queries and timing 2022-01-01 19:25:09 -06:00
21 changed files with 3518 additions and 800 deletions

958
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,26 +1,32 @@
[package] [package]
name = "nostr-rs-relay" name = "nostr-rs-relay"
version = "0.3.0" version = "0.5.2"
edition = "2021" edition = "2021"
[dependencies] [dependencies]
log = "^0.4" log = "^0.4"
env_logger = "^0.9" env_logger = "^0.9"
tokio = { version = "^1.14", features = ["full"] } tokio = { version = "^1.16", features = ["full"] }
futures = "^0.3" futures = "^0.3"
futures-util = "^0.3" futures-util = "^0.3"
tokio-tungstenite = "^0.16" tokio-tungstenite = "^0.17"
tungstenite = "^0.16" tungstenite = "^0.17"
thiserror = "^1" thiserror = "^1"
uuid = { version = "^0.8", features = ["v4"] } uuid = { version = "^0.8", features = ["v4"] }
config = { version = "0.11", features = ["toml"] } config = { version = "^0.12", features = ["toml"] }
bitcoin_hashes = { version = "^0.9", features = ["serde"] } bitcoin_hashes = { version = "^0.10", features = ["serde"] }
secp256k1 = {git = "https://github.com/rust-bitcoin/rust-secp256k1.git", rev = "50034ccb18fdd84904ab3aa6c84a12fcced33209", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] } secp256k1 = {version = "^0.21", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
serde = { version = "^1.0", features = ["derive"] } serde = { version = "^1.0", features = ["derive"] }
serde_json = "^1.0" serde_json = {version = "^1.0", features = ["preserve_order"]}
hex = "^0.4" hex = "^0.4"
rusqlite = "^0.26" rusqlite = { version = "^0.26", features = ["limits"]}
r2d2 = "^0.8"
r2d2_sqlite = "^0.19"
lazy_static = "^1.4" lazy_static = "^1.4"
governor = "^0.4" governor = "^0.4"
nonzero_ext = "^0.3" nonzero_ext = "^0.3"
hyper={ version="0.14", features=["server","http1","http2","tcp"] } hyper = { version="0.14", features=["client", "server","http1","http2","tcp"] }
hyper-tls = "^0.5"
http = { version = "^0.2" }
parse_duration = "^2"
rand = "^0.8"

View File

@@ -1,4 +1,4 @@
FROM rust:1.57 as builder FROM rust:1.58.1 as builder
RUN USER=root cargo new --bin nostr-rs-relay RUN USER=root cargo new --bin nostr-rs-relay
WORKDIR ./nostr-rs-relay WORKDIR ./nostr-rs-relay
@@ -12,11 +12,11 @@ COPY ./src ./src
RUN rm ./target/release/deps/nostr*relay* RUN rm ./target/release/deps/nostr*relay*
RUN cargo build --release RUN cargo build --release
FROM debian:buster-slim FROM debian:bullseye-20220125-slim
ARG APP=/usr/src/app ARG APP=/usr/src/app
ARG APP_DATA=/usr/src/app/db ARG APP_DATA=/usr/src/app/db
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y ca-certificates tzdata sqlite3 \ && apt-get install -y ca-certificates tzdata sqlite3 libc6 \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
EXPOSE 8080 EXPOSE 8080

View File

@@ -8,6 +8,20 @@ The project master repository is available on
[sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is [sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is
mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay). mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
## Features
NIPs with a relay-specific implementation are listed here.
- [x] NIP-01: Core event model
- [x] NIP-01: Hide old metadata events
- [x] NIP-01: Id/Author prefix search (_experimental_)
- [x] NIP-02: Hide old contact list events
- [ ] NIP-03: OpenTimestamps
- [x] NIP-05: Mapping Nostr keys to DNS identifiers
- [ ] NIP-09: Event deletion
- [x] NIP-11: Relay information document
- [x] NIP-12: Generic tag search (_experimental_)
## Quick Start ## Quick Start
The provided `Dockerfile` will compile and build the server The provided `Dockerfile` will compile and build the server
@@ -40,11 +54,11 @@ Text Note [81cf...2652] from 296a...9b92 5 seconds ago
``` ```
A pre-built container is also available on DockerHub: A pre-built container is also available on DockerHub:
https://hub.docker.com/repository/docker/scsibug/nostr-rs-relay https://hub.docker.com/r/scsibug/nostr-rs-relay
## Configuration ## Configuration
The sample `[config.toml](config.toml)` file demonstrates the The sample [`config.toml`](config.toml) file demonstrates the
configuration available to the relay. This file is optional, but may configuration available to the relay. This file is optional, but may
be mounted into a docker container like so: be mounted into a docker container like so:
@@ -58,6 +72,16 @@ $ docker run -it -p 7000:8080 \
Options include rate-limiting, event size limits, and network address Options include rate-limiting, event size limits, and network address
settings. settings.
## Reverse Proxy Configuration
For examples of putting the relay behind a reverse proxy (for TLS
termination, load balancing, and other features), see [Reverse
Proxy](reverse-proxy.md).
## Dev Channel
The current dev discussions for this project is happening at https://discord.gg/ufG6fH52Vk.
Drop in to query any development related questions.
License License
--- ---
This project is MIT licensed. This project is MIT licensed.

View File

@@ -1,13 +1,39 @@
# Nostr-rs-relay configuration # Nostr-rs-relay configuration
[info]
# The advertised URL for the Nostr websocket.
relay_url = "wss://nostr.example.com/"
# Relay information for clients. Put your unique server name here.
name = "nostr-rs-relay"
# Description
description = "A newly created nostr-rs-relay.\n\nCustomize this with your own info."
# Administrative contact pubkey
#pubkey = "0c2d168a4ae8ca58c9f1ab237b5df682599c6c7ab74307ea8b05684b60405d41"
# Administrative contact URI
#contact = "mailto:contact@example.com"
[database] [database]
# Directory for SQLite files. Defaults to the current directory. Can # Directory for SQLite files. Defaults to the current directory. Can
# also be specified (and overriden) with the "--db dirname" command # also be specified (and overriden) with the "--db dirname" command
# line option. # line option.
data_directory = "." data_directory = "."
# Database connection pool settings for subscribers:
# Minimum number of SQLite reader connections
#min_conn = 4
# Maximum number of SQLite reader connections
#max_conn = 128
[network] [network]
# Bind to this network address # Bind to this network address
address = "0.0.0.0" address = "0.0.0.0"
# Listen on this port # Listen on this port
port = 8080 port = 8080
@@ -20,22 +46,57 @@ reject_future_seconds = 1800
[limits] [limits]
# Limit events created per second, averaged over one minute. Must be # Limit events created per second, averaged over one minute. Must be
# an integer. If not set (or set to 0), defaults to unlimited. # an integer. If not set (or set to 0), defaults to unlimited.
messages_per_sec = 0 #messages_per_sec = 0
# Limit the maximum size of an EVENT message. Defaults to 128 KB. # Limit the maximum size of an EVENT message. Defaults to 128 KB.
# Set to 0 for unlimited. # Set to 0 for unlimited.
max_event_bytes = 131072 #max_event_bytes = 131072
# Maximum WebSocket message in bytes. Defaults to 128 KB. # Maximum WebSocket message in bytes. Defaults to 128 KB.
max_ws_message_bytes = 131072 #max_ws_message_bytes = 131072
# Maximum WebSocket frame size in bytes. Defaults to 128 KB. # Maximum WebSocket frame size in bytes. Defaults to 128 KB.
max_ws_frame_bytes = 131072 #max_ws_frame_bytes = 131072
# Broadcast buffer size, in number of events. This prevents slow # Broadcast buffer size, in number of events. This prevents slow
# readers from consuming memory. Defaults to 4096. # readers from consuming memory.
broadcast_buffer = 4096 #broadcast_buffer = 16384
# Event persistence buffer size, in number of events. This provides # Event persistence buffer size, in number of events. This provides
# backpressure to senders if writes are slow. Defaults to 16. # backpressure to senders if writes are slow.
event_persist_buffer = 16 #event_persist_buffer = 4096
[authorization]
# Pubkey addresses in this array are whitelisted for event publishing.
# Only valid events by these authors will be accepted, if the variable
# is set.
#pubkey_whitelist = [
# "35d26e4690cbe1a898af61cc3515661eb5fa763b57bd0b42e45099c8b32fd50f",
# "887645fef0ce0c3c1218d2f5d8e6132a19304cdc57cd20281d082f38cfea0072",
#]
[verified_users]
# NIP-05 verification of users. Can be "enabled" to require NIP-05
# metadata for event authors, "passive" to perform validation but
# never block publishing, or "disabled" to do nothing.
#mode = "disabled"
# Domain names that will be prevented from publishing events.
#domain_blacklist = ["wellorder.net"]
# Domain names that are allowed to publish events. If defined, only
# events NIP-05 verified authors at these domains are persisted.
#domain_whitelist = ["example.com"]
# Consider an pubkey "verified" if we have a successful validation
# from the NIP-05 domain within this amount of time. Note, if the
# domain provides a successful response that omits the account,
# verification is immediately revoked.
#verify_expiration = "1 week"
# How long to wait between verification attempts for a specific author.
#verify_update_frequency = "24 hours"
# How many consecutive failed checks before we give up on verifying
# this author.
#max_consecutive_failures = 20

View File

@@ -0,0 +1,248 @@
# Author Verification Design Document
The relay will use NIP-05 DNS-based author verification to limit which
authors can publish events to a relay. This document describes how
this feature will operate.
## Considerations
DNS-based author verification is designed to be deployed in relays that
want to prevent spam, so there should be strong protections to prevent
unauthorized authors from persisting data. This includes data needed to
verify new authors.
There should be protections in place to ensure the relay cannot be
used to spam or flood other webservers. Additionally, there should be
protections against server-side request forgery (SSRF).
## Design Overview
### Concepts
All authors are initially "unverified". Unverified authors that submit
appropriate `NIP-05` metadata events become "candidates" for
verification. A candidate author becomes verified when the relay
inspects a kind `0` metadata event for the author with a `nip05` field,
and follows the procedure in `NIP-05` to successfully associate the
author with an internet identifier.
The `NIP-05` procedure verifies an author for a fixed period of time,
configurable by the relay operator. If this "verification expiration
time" (`verify_expiration`) is exceeded without being refreshed, they
are once again unverified.
Verified authors have their status regularly and automatically updated
through scheduled polling to their verified domain, this process is
"re-verification". It is performed based on the configuration setting
`verify_update_frequency`, which defines how long the relay waits
between verification attempts (whether the result was success or
failure).
Authors may change their verification data (the internet identifier from
`NIP-05`) with a new metadata event, which then requires
re-verification. Their old verification remains valid until
expiration.
Performing candidate author verification is a best-effort activity and
may be significantly rate-limited to prevent relays being used to
attack other hosts. Candidate verification (untrusted authors) should
never impact re-verification (trusted authors).
## Operating Modes
The relay may operate in one of three modes. "Disabled" performs no
validation activities, and will never permit or deny events based on
an author's NIP-05 metadata. "Passive" performs NIP-05 validation,
but does not permit or deny events based on the validity or presence
of NIP-05 metadata. "Enabled" will require current and valid NIP-05
metadata for any events to be persisted. "Enabled" mode will
additionally consider domain whitelist/blacklist configuration data to
restrict which author's events are persisted.
## Design Details
### Data Storage
Verification is stored in a dedicated table. This tracks:
* `nip05` identifier
* most recent verification timestamp
* most recent verification failure timestamp
* reference to the metadata event (used for tracking `created_at` and
`pubkey`)
### Event Handling
All events are first validated to ensure the signature is valid.
Incoming events of kind _other_ than metadata (kind `0`) submitted by
clients will be evaluated as follows.
* If the event's author has a current verification, the event is
persisted as normal.
* If the event's author has either no verification, or the
verification is expired, the event is rejected.
If the event is a metadata event, we handle it differently.
We first determine the verification status of the event's pubkey.
* If the event author is unverified, AND the event contains a `nip05`
key, we consider this a verification candidate.
* If the event author is unverified, AND the event does not contain a
`nip05` key, this is not a candidate, and the event is dropped.
* If the event author is verified, AND the event contains a `nip05`
key that is identical to the currently stored value, no special
action is needed.
* If the event author is verified, AND the event contains a different
`nip05` than was previously verified, with a more recent timestamp,
we need to re-verify.
* If the event author is verified, AND the event is missing a `nip05`
key, and the event timestamp is more recent than what was verified,
we do nothing. The current verification will be allowed to expire.
### Candidate Verification
When a candidate verification is requested, a rate limit will be
utilized. If the rate limit is exceeded, new candidate verification
requests will be dropped. In practice, this is implemented by a
size-limited channel that drops events that exceed a threshold.
Candidates are never persisted in the database.
### Re-Verification
Re-verification is straightforward when there has been no change to
the `nip05` key. A new request to the `nip05` domain is performed,
and if successful, the verification timestamp is updated to the
current time. If the request fails due to a timeout or server error,
the failure timestamp is updated instead.
When the the `nip05` key has changed and this event is more recent, we
will create a new verification record, and delete all other records
for the same name.
Regarding creating new records vs. updating: We never update the event
reference or `nip05` identifier in a verification record. Every update
either reset the last failure or last success timestamp.
### Determining Verification Status
In determining if an event is from a verified author, the following
procedure should be used:
Join the verification table with the event table, to provide
verification data alongside the event `created_at` and `pubkey`
metadata. Find the most recent verification record for the author,
based on the `created_at` time.
Reject the record if the success timestamp is not within our
configured expiration time.
Reject records with disallowed domains, based on any whitelists or
blacklists in effect.
If a result remains, the author is treated as verified.
This does give a time window for authors transitioning their verified
status between domains. There may be a period of time in which there
are multiple valid rows in the verification table for a given author.
### Cleaning Up Inactive Verifications
After a author verification has expired, we will continue to check for
it to become valid again. After a configurable number of attempts, we
should simply forget it, and reclaim the space.
### Addition of Domain Whitelist/Blacklist
A set of whitelisted or blacklisted domains may be provided. If both
are provided, only the whitelist is used. In this context, domains
are either "allowed" (present on a whitelist and NOT present on a
blacklist), or "denied" (NOT present on a whitelist and present on a
blacklist).
The processes outlined so far are modified in the presence of these
options:
* Only authors with allowed domains can become candidates for
verification.
* Verification status queries additionally filter out any denied
domains.
* Re-verification processes only proceed with allowed domains.
### Integration
We have an existing database writer thread, which receives events and
attempts to persist them to disk. Once validated and persisted, these
events are broadcast to all subscribers.
When verification is enabled, the writer must check to ensure a valid,
unexpired verification record exists for the auther. All metadata
events (regardless of verification status) are forwarded to a verifier
module. If the verifier determines a new verification record is
needed, it is also responsible for persisting and broadcasting the
event, just as the database writer would have done.
## Threat Scenarios
Some of these mitigations are fully implemented, others are documented
simply to demonstrate a mitigation is possible.
### Domain Spamming
*Threat*: A author with a high-volume of events creates a metadata event
with a bogus domain, causing the relay to generate significant
unwanted traffic to a target.
*Mitigation*: Rate limiting for all candidate verification will limit
external requests to a reasonable amount. Currently, this is a simple
delay that slows down the HTTP task.
### Denial of Service for Legitimate Authors
*Threat*: A author with a high-volume of events creates a metadata event
with a domain that is invalid for them, _but which is used by other
legitimate authors_. This triggers rate-limiting against the legitimate
domain, and blocks authors from updating their own metadata.
*Mitigation*: Rate limiting should only apply to candidates, so any
existing verified authors have priority for re-verification. New
authors will be affected, as we can not distinguish between the threat
and a legitimate author. _(Unimplemented)_
### Denial of Service by Consuming Storage
*Threat*: A author creates a high volume of random metadata events with
unique domains, in order to cause us to store large amounts of data
for to-be-verified authors.
*Mitigation*: No data is stored for candidate authors. This makes it
harder for new authors to become verified, but is effective at
preventing this attack.
### Metadata Replay for Verified Author
*Threat*: Attacker replays out-of-date metadata event for a author, to
cause a verification to fail.
*Mitigation*: New metadata events have their signed timestamp compared
against the signed timestamp of the event that has most recently
verified them. If the metadata event is older, it is discarded.
### Server-Side Request Forgery via Metadata
*Threat*: Attacker includes malicious data in the `nip05` event, which
is used to generate HTTP requests against potentially internal
resources. Either leaking data, or invoking webservices beyond their
own privileges.
*Mitigation*: Consider detecting and dropping when the `nip05` field
is an IP address. Allow the relay operator to utilize the `blacklist`
or `whitelist` to constrain hosts that will be contacted. Most
importantly, the verification process is hardcoded to only make
requests to a known url path
(`.well-known/nostr.json?name=<LOCAL_NAME>`). The `<LOCAL_NAME>`
component is restricted to a basic ASCII subset (preventing additional
URL components).

53
reverse-proxy.md Normal file
View File

@@ -0,0 +1,53 @@
# Reverse Proxy Setup Guide
It is recommended to run `nostr-rs-relay` behind a reverse proxy such
as `haproxy` or `nginx` to provide TLS termination. A simple example
of an `haproxy` configuration is documented here.
## Minimal HAProxy Configuration
Assumptions:
* HAProxy version is `2.4.10` or greater (older versions not tested).
* Hostname for the relay is `relay.example.com`.
* Your relay should be available over wss://relay.example.com
* Your (NIP-11) relay info page should be available on https://relay.example.com
* SSL certificate is located in `/etc/certs/example.com.pem`.
* Relay is running on port 8080.
* Limit connections to 400 concurrent.
* HSTS (HTTP Strict Transport Security) is desired.
* Only TLS 1.2 or greater is allowed.
```
global
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
frontend fe_prod
mode http
bind :443 ssl crt /etc/certs/example.com.pem alpn h2,http/1.1
bind :80
http-request set-header X-Forwarded-Proto https if { ssl_fc }
redirect scheme https code 301 if !{ ssl_fc }
acl host_relay hdr(host) -i relay.example.com
use_backend relay if host_relay
# HSTS (1 year)
http-response set-header Strict-Transport-Security max-age=31536000
backend relay
mode http
timeout connect 5s
timeout client 50s
timeout server 50s
timeout tunnel 1h
timeout client-fin 30s
option tcp-check
default-server maxconn 400 check inter 20s fastinter 1s
server relay 127.0.0.1:8080
```
### Notes
You may experience WebSocket connection problems with Firefox if
HTTP/2 is enabled, for older versions of HAProxy (2.3.x). Either
disable HTTP/2 (`h2`), or upgrade HAProxy.

View File

@@ -1,4 +1,6 @@
//! Subscription close request parsing //! Subscription close request parsing
//!
//! Representation and parsing of `CLOSE` messages sent from clients.
use crate::error::{Error, Result}; use crate::error::{Error, Result};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -11,7 +13,7 @@ pub struct CloseCmd {
id: String, id: String,
} }
/// Close command parsed /// Identifier of the subscription to be closed.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct Close { pub struct Close {
/// The subscription identifier being closed. /// The subscription identifier being closed.

View File

@@ -1,17 +1,32 @@
//! Configuration file and settings management
use config::{Config, ConfigError, File};
use lazy_static::lazy_static; use lazy_static::lazy_static;
use log::*; use log::*;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::sync::RwLock; use std::sync::RwLock;
use std::time::Duration;
// initialize a singleton default configuration // initialize a singleton default configuration
lazy_static! { lazy_static! {
pub static ref SETTINGS: RwLock<Settings> = RwLock::new(Settings::default()); pub static ref SETTINGS: RwLock<Settings> = RwLock::new(Settings::default());
} }
#[derive(Debug, Serialize, Deserialize, Clone)]
#[allow(unused)]
pub struct Info {
pub relay_url: Option<String>,
pub name: Option<String>,
pub description: Option<String>,
pub pubkey: Option<String>,
pub contact: Option<String>,
}
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
#[allow(unused)] #[allow(unused)]
pub struct Database { pub struct Database {
pub data_directory: String, pub data_directory: String,
pub min_conn: u32,
pub max_conn: u32,
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
@@ -49,39 +64,117 @@ pub struct Limits {
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow) pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
} }
#[derive(Debug, Serialize, Deserialize)]
#[allow(unused)]
pub struct Authorization {
pub pubkey_whitelist: Option<Vec<String>>, // If present, only allow these pubkeys to publish events
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
#[serde(rename_all = "lowercase")]
pub enum VerifiedUsersMode {
Enabled,
Passive,
Disabled,
}
#[derive(Debug, Serialize, Deserialize)]
#[allow(unused)]
pub struct VerifiedUsers {
pub mode: VerifiedUsersMode, // Mode of operation: "enabled" (enforce) or "passive" (check only). If none, this is simply disabled.
pub domain_whitelist: Option<Vec<String>>, // If present, only allow verified users from these domains can publish events
pub domain_blacklist: Option<Vec<String>>, // If present, allow all verified users from any domain except these
pub verify_expiration: Option<String>, // how long a verification is cached for before no longer being used
pub verify_update_frequency: Option<String>, // how often to attempt to update verification
pub verify_expiration_duration: Option<Duration>, // internal result of parsing verify_expiration
pub verify_update_frequency_duration: Option<Duration>, // internal result of parsing verify_update_frequency
pub max_consecutive_failures: usize, // maximum number of verification failures in a row, before ceasing future checks
}
impl VerifiedUsers {
pub fn init(&mut self) {
self.verify_expiration_duration = self.verify_expiration_duration();
self.verify_update_frequency_duration = self.verify_update_duration();
}
pub fn is_enabled(&self) -> bool {
self.mode == VerifiedUsersMode::Enabled
}
pub fn is_active(&self) -> bool {
self.mode == VerifiedUsersMode::Enabled || self.mode == VerifiedUsersMode::Passive
}
pub fn is_passive(&self) -> bool {
self.mode == VerifiedUsersMode::Passive
}
pub fn verify_expiration_duration(&self) -> Option<Duration> {
self.verify_expiration
.as_ref()
.and_then(|x| parse_duration::parse(x).ok())
}
pub fn verify_update_duration(&self) -> Option<Duration> {
self.verify_update_frequency
.as_ref()
.and_then(|x| parse_duration::parse(x).ok())
}
pub fn is_valid(&self) -> bool {
self.verify_expiration_duration().is_some() && self.verify_update_duration().is_some()
}
}
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
#[allow(unused)] #[allow(unused)]
pub struct Settings { pub struct Settings {
pub info: Info,
pub database: Database, pub database: Database,
pub network: Network, pub network: Network,
pub limits: Limits, pub limits: Limits,
pub authorization: Authorization,
pub verified_users: VerifiedUsers,
pub retention: Retention, pub retention: Retention,
pub options: Options, pub options: Options,
} }
impl Settings { impl Settings {
pub fn new() -> Self { pub fn new() -> Self {
let d = Self::default(); let default_settings = Self::default();
// attempt to construct settings with file // attempt to construct settings with file
// Self::new_from_default(&d).unwrap_or(d) let from_file = Self::new_from_default(&default_settings);
let from_file = Self::new_from_default(&d);
match from_file { match from_file {
Ok(f) => f, Ok(f) => f,
Err(e) => { Err(e) => {
warn!("Error reading config file ({:?})", e); warn!("Error reading config file ({:?})", e);
d default_settings
} }
} }
} }
fn new_from_default(default: &Settings) -> Result<Self, config::ConfigError> { fn new_from_default(default: &Settings) -> Result<Self, ConfigError> {
let config: config::Config = config::Config::new(); let builder = Config::builder();
let settings: Settings = config let config: Config = builder
// use defaults // use defaults
.with_merged(config::Config::try_from(default).unwrap())? .add_source(Config::try_from(default)?)
// override with file contents // override with file contents
.with_merged(config::File::with_name("config"))? .add_source(File::with_name("config"))
.try_into()?; .build()?
.try_into()
.unwrap();
let mut settings: Settings = config.try_deserialize()?;
// ensure connection pool size is logical
if settings.database.min_conn > settings.database.max_conn {
panic!(
"Database min_conn setting ({}) cannot exceed max_conn ({})",
settings.database.min_conn, settings.database.max_conn
);
}
// ensure durations parse
if !settings.verified_users.is_valid() {
panic!("VerifiedUsers time settings could not be parsed");
}
// initialize durations for verified users
settings.verified_users.init();
Ok(settings) Ok(settings)
} }
} }
@@ -89,8 +182,17 @@ impl Settings {
impl Default for Settings { impl Default for Settings {
fn default() -> Self { fn default() -> Self {
Settings { Settings {
info: Info {
relay_url: None,
name: Some("Unnamed nostr-rs-relay".to_owned()),
description: None,
pubkey: None,
contact: None,
},
database: Database { database: Database {
data_directory: ".".to_owned(), data_directory: ".".to_owned(),
min_conn: 4,
max_conn: 128,
}, },
network: Network { network: Network {
port: 8080, port: 8080,
@@ -101,8 +203,21 @@ impl Default for Settings {
max_event_bytes: Some(2 << 17), // 128K max_event_bytes: Some(2 << 17), // 128K
max_ws_message_bytes: Some(2 << 17), // 128K max_ws_message_bytes: Some(2 << 17), // 128K
max_ws_frame_bytes: Some(2 << 17), // 128K max_ws_frame_bytes: Some(2 << 17), // 128K
broadcast_buffer: 4096, broadcast_buffer: 16384,
event_persist_buffer: 16, event_persist_buffer: 4096,
},
authorization: Authorization {
pubkey_whitelist: None, // Allow any address to publish
},
verified_users: VerifiedUsers {
mode: VerifiedUsersMode::Disabled,
domain_whitelist: None,
domain_blacklist: None,
verify_expiration: Some("1 week".to_owned()),
verify_update_frequency: Some("1 day".to_owned()),
verify_expiration_duration: None,
verify_update_frequency_duration: None,
max_consecutive_failures: 20,
}, },
retention: Retention { retention: Retention {
max_events: None, // max events max_events: None, // max events

561
src/db.rs
View File

@@ -1,140 +1,122 @@
//! Event persistence and querying //! Event persistence and querying
use crate::config::SETTINGS;
use crate::error::Error;
use crate::error::Result; use crate::error::Result;
use crate::event::Event; use crate::event::Event;
use crate::hexrange::hex_range;
use crate::hexrange::HexSearch;
use crate::nip05;
use crate::schema::{upgrade_db, STARTUP_SQL};
use crate::subscription::Subscription; use crate::subscription::Subscription;
use crate::utils::is_hex;
use governor::clock::Clock; use governor::clock::Clock;
use governor::{Quota, RateLimiter}; use governor::{Quota, RateLimiter};
use hex; use hex;
use log::*; use log::*;
use r2d2;
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::params; use rusqlite::params;
use rusqlite::types::ToSql;
use rusqlite::Connection; use rusqlite::Connection;
use rusqlite::OpenFlags; use rusqlite::OpenFlags;
//use std::num::NonZeroU32;
use crate::config::SETTINGS;
use std::path::Path; use std::path::Path;
use std::thread; use std::thread;
use std::time::Duration;
use std::time::Instant;
use tokio::task; use tokio::task;
pub type SqlitePool = r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>;
pub type PooledConnection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnectionManager>;
/// Events submitted from a client, with a return channel for notices
pub struct SubmittedEvent {
pub event: Event,
pub notice_tx: tokio::sync::mpsc::Sender<String>,
}
/// Database file /// Database file
const DB_FILE: &str = "nostr.db"; pub const DB_FILE: &str = "nostr.db";
/// Startup DB Pragmas /// Build a database connection pool.
const STARTUP_SQL: &str = r##" pub fn build_pool(
PRAGMA main.synchronous=NORMAL; name: &str,
PRAGMA foreign_keys = ON; flags: OpenFlags,
pragma mmap_size = 536870912; -- 512MB of mmap min_size: u32,
"##; max_size: u32,
wait_for_db: bool,
) -> SqlitePool {
let settings = SETTINGS.read().unwrap();
/// Schema definition let db_dir = &settings.database.data_directory;
const INIT_SQL: &str = r##" let full_path = Path::new(db_dir).join(DB_FILE);
-- Database settings // small hack; if the database doesn't exist yet, that means the
PRAGMA encoding = "UTF-8"; // writer thread hasn't finished. Give it a chance to work. This
PRAGMA journal_mode=WAL; // is only an issue with the first time we run.
PRAGMA main.synchronous=NORMAL; while !full_path.exists() && wait_for_db {
PRAGMA foreign_keys = ON; debug!("Database reader pool is waiting on the database to be created...");
PRAGMA application_id = 1654008667; thread::sleep(Duration::from_millis(500));
PRAGMA user_version = 2;
-- Event Table
CREATE TABLE IF NOT EXISTS event (
id INTEGER PRIMARY KEY,
event_hash BLOB NOT NULL, -- 4-byte hash
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970)
created_at INTEGER NOT NULL, -- when the event was authored
author BLOB NOT NULL, -- author pubkey
kind INTEGER NOT NULL, -- event kind
hidden INTEGER, -- relevant for queries
content TEXT NOT NULL -- serialized json of event object
);
-- Event Indexes
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS author_index ON event(author);
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
-- Event References Table
CREATE TABLE IF NOT EXISTS event_ref (
id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL, -- an event ID that contains an #e tag.
referenced_event BLOB NOT NULL, -- the event that is referenced.
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
-- Event References Index
CREATE INDEX IF NOT EXISTS event_ref_index ON event_ref(referenced_event);
-- Pubkey References Table
CREATE TABLE IF NOT EXISTS pubkey_ref (
id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL, -- an event ID that contains an #p tag.
referenced_pubkey BLOB NOT NULL, -- the pubkey that is referenced.
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE RESTRICT ON DELETE CASCADE
);
-- Pubkey References Index
CREATE INDEX IF NOT EXISTS pubkey_ref_index ON pubkey_ref(referenced_pubkey);
"##;
/// Upgrade DB to latest version, and execute pragma settings
pub fn upgrade_db(conn: &mut Connection) -> Result<()> {
// check the version.
let curr_version = db_version(conn)?;
info!("DB version = {:?}", curr_version);
// initialize from scratch
if curr_version == 0 {
match conn.execute_batch(INIT_SQL) {
Ok(()) => info!("database pragma/schema initialized to v2, and ready"),
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be initialized");
} }
} let manager = SqliteConnectionManager::file(&full_path)
} else if curr_version == 1 { .with_flags(flags)
// only change is adding a hidden column to events. .with_init(|c| c.execute_batch(STARTUP_SQL));
let upgrade_sql = r##" let pool: SqlitePool = r2d2::Pool::builder()
ALTER TABLE event ADD hidden INTEGER; .test_on_check_out(true) // no noticeable performance hit
UPDATE event SET hidden=FALSE; .min_idle(Some(min_size))
PRAGMA user_version = 2; .max_size(max_size)
"##; .build(manager)
match conn.execute_batch(upgrade_sql) { .unwrap();
Ok(()) => info!("database schema upgraded v1 -> v2"), info!(
Err(err) => { "Built a connection pool {:?} (min={}, max={})",
error!("update failed: {}", err); name, min_size, max_size
panic!("database could not be upgraded"); );
} pool
} }
} else if curr_version == 2 {
debug!("Database version was already current"); /// Build a single database connection, with provided flags
} else if curr_version > 2 { pub fn build_conn(flags: OpenFlags) -> Result<Connection> {
panic!("Database version is newer than supported by this executable"); let settings = SETTINGS.read().unwrap();
} let db_dir = &settings.database.data_directory;
// Setup PRAGMA let full_path = Path::new(db_dir).join(DB_FILE);
conn.execute_batch(STARTUP_SQL)?; // create a connection
Ok(()) Ok(Connection::open_with_flags(&full_path, flags)?)
} }
/// Spawn a database writer that persists events to the SQLite store. /// Spawn a database writer that persists events to the SQLite store.
pub async fn db_writer( pub async fn db_writer(
mut event_rx: tokio::sync::mpsc::Receiver<Event>, mut event_rx: tokio::sync::mpsc::Receiver<SubmittedEvent>,
bcast_tx: tokio::sync::broadcast::Sender<Event>, bcast_tx: tokio::sync::broadcast::Sender<Event>,
metadata_tx: tokio::sync::broadcast::Sender<Event>,
mut shutdown: tokio::sync::broadcast::Receiver<()>, mut shutdown: tokio::sync::broadcast::Receiver<()>,
) -> tokio::task::JoinHandle<Result<()>> { ) -> tokio::task::JoinHandle<Result<()>> {
let settings = SETTINGS.read().unwrap();
// are we performing NIP-05 checking?
let nip05_active = settings.verified_users.is_active();
// are we requriing NIP-05 user verification?
let nip05_enabled = settings.verified_users.is_enabled();
task::spawn_blocking(move || { task::spawn_blocking(move || {
// get database configuration settings // get database configuration settings
let config = SETTINGS.read().unwrap(); let settings = SETTINGS.read().unwrap();
let db_dir = &config.database.data_directory; let db_dir = &settings.database.data_directory;
let full_path = Path::new(db_dir).join(DB_FILE); let full_path = Path::new(db_dir).join(DB_FILE);
// create a connection // create a connection pool
let mut conn = Connection::open_with_flags( let pool = build_pool(
&full_path, "event writer",
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
)?; 1,
4,
false,
);
info!("opened database {:?} for writing", full_path); info!("opened database {:?} for writing", full_path);
upgrade_db(&mut conn)?; upgrade_db(&mut pool.get()?)?;
// Make a copy of the whitelist
let whitelist = &settings.authorization.pubkey_whitelist.clone();
// get rate limit settings // get rate limit settings
let rps_setting = config.limits.messages_per_sec; let rps_setting = settings.limits.messages_per_sec;
let mut most_recent_rate_limit = Instant::now();
let mut lim_opt = None; let mut lim_opt = None;
let clock = governor::clock::QuantaClock::default(); let clock = governor::clock::QuantaClock::default();
if let Some(rps) = rps_setting { if let Some(rps) = rps_setting {
@@ -156,47 +138,132 @@ pub async fn db_writer(
break; break;
} }
let mut event_write = false; let mut event_write = false;
let event = next_event.unwrap(); let subm_event = next_event.unwrap();
match write_event(&mut conn, &event) { let event = subm_event.event;
let notice_tx = subm_event.notice_tx;
// check if this event is authorized.
if let Some(allowed_addrs) = whitelist {
// if the event address is not in allowed_addrs.
if !allowed_addrs.contains(&event.pubkey) {
info!(
"Rejecting event {}, unauthorized author",
event.get_event_id_prefix()
);
notice_tx
.try_send("pubkey is not allowed to publish to this relay".to_owned())
.ok();
continue;
}
}
// send any metadata events to the NIP-05 verifier
if nip05_active && event.is_kind_metadata() {
// we are sending this prior to even deciding if we
// persist it. this allows the nip05 module to
// inspect it, update if necessary, or persist a new
// event and broadcast it itself.
metadata_tx.send(event.clone()).ok();
}
// check for NIP-05 verification
if nip05_enabled {
match nip05::query_latest_user_verification(pool.get()?, event.pubkey.to_owned()) {
Ok(uv) => {
if uv.is_valid() {
info!(
"new event from verified author ({:?},{:?})",
uv.name.to_string(),
event.get_author_prefix()
);
} else {
info!("rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)",
uv.name.to_string(),
event.get_author_prefix()
);
notice_tx
.try_send(
"NIP-05 verification is no longer valid (expired/wrong domain)"
.to_owned(),
)
.ok();
continue;
}
}
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
debug!(
"no verification records found for pubkey: {:?}",
event.get_author_prefix()
);
notice_tx
.try_send("NIP-05 verification needed to publish events".to_owned())
.ok();
continue;
}
Err(e) => {
warn!("checking nip05 verification status failed: {:?}", e);
continue;
}
}
}
// TODO: cache recent list of authors to remove a DB call.
let start = Instant::now();
match write_event(&mut pool.get()?, &event) {
Ok(updated) => { Ok(updated) => {
if updated == 0 { if updated == 0 {
debug!("ignoring duplicate event"); trace!("ignoring duplicate event");
} else { } else {
info!("persisted event: {}", event.get_event_id_prefix()); info!(
"persisted event {:?} from {:?} in {:?}",
event.get_event_id_prefix(),
event.get_author_prefix(),
start.elapsed()
);
event_write = true; event_write = true;
// send this out to all clients // send this out to all clients
bcast_tx.send(event.clone()).ok(); bcast_tx.send(event.clone()).ok();
} }
} }
Err(err) => { Err(err) => {
warn!("event insert failed: {}", err); warn!("event insert failed: {:?}", err);
notice_tx
.try_send(
"relay experienced an error trying to publish the latest event"
.to_owned(),
)
.ok();
} }
} }
// use rate limit, if defined, and if an event was actually written. // use rate limit, if defined, and if an event was actually written.
if event_write { if event_write {
if let Some(ref lim) = lim_opt { if let Some(ref lim) = lim_opt {
if let Err(n) = lim.check() { if let Err(n) = lim.check() {
info!("Rate limiting event creation"); let wait_for = n.wait_time_from(clock.now());
thread::sleep(n.wait_time_from(clock.now())); // check if we have recently logged rate
// limits, but print out a message only once
// per second.
if most_recent_rate_limit.elapsed().as_secs() > 10 {
warn!(
"rate limit reached for event creation (sleep for {:?}) (suppressing future messages for 10 seconds)",
wait_for
);
// reset last rate limit message
most_recent_rate_limit = Instant::now();
}
// block event writes, allowing them to queue up
thread::sleep(wait_for);
continue; continue;
} }
} }
} }
} }
conn.close().ok();
info!("database connection closed"); info!("database connection closed");
Ok(()) Ok(())
}) })
} }
pub fn db_version(conn: &mut Connection) -> Result<usize> { /// Persist an event to the database, returning rows added.
let query = "PRAGMA user_version;"; pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
let curr_version = conn.query_row(query, [], |row| row.get(0))?;
Ok(curr_version)
}
/// Persist an event to the database.
pub fn write_event(conn: &mut Connection, e: &Event) -> Result<usize> {
// start transaction // start transaction
let tx = conn.transaction()?; let tx = conn.transaction()?;
// get relevant fields from event and convert to blobs. // get relevant fields from event and convert to blobs.
@@ -215,25 +282,25 @@ pub fn write_event(conn: &mut Connection, e: &Event) -> Result<usize> {
} }
// remember primary key of the event most recently inserted. // remember primary key of the event most recently inserted.
let ev_id = tx.last_insert_rowid(); let ev_id = tx.last_insert_rowid();
// add all event tags into the event_ref table // add all tags to the tag table
let etags = e.get_event_tags(); for tag in e.tags.iter() {
if !etags.is_empty() { // ensure we have 2 values.
for etag in etags.iter() { if tag.len() >= 2 {
let tagname = &tag[0];
let tagval = &tag[1];
// if tagvalue is hex;
if is_hex(tagval) {
tx.execute( tx.execute(
"INSERT OR IGNORE INTO event_ref (event_id, referenced_event) VALUES (?1, ?2)", "INSERT OR IGNORE INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3)",
params![ev_id, hex::decode(&etag).ok()], params![ev_id, &tagname, hex::decode(&tagval).ok()],
)?;
} else {
tx.execute(
"INSERT OR IGNORE INTO tag (event_id, name, value) VALUES (?1, ?2, ?3)",
params![ev_id, &tagname, &tagval],
)?; )?;
} }
} }
// add all event tags into the pubkey_ref table
let ptags = e.get_pubkey_tags();
if !ptags.is_empty() {
for ptag in ptags.iter() {
tx.execute(
"INSERT OR IGNORE INTO pubkey_ref (event_id, referenced_pubkey) VALUES (?1, ?2)",
params![ev_id, hex::decode(&ptag).ok()],
)?;
}
} }
// if this event is for a metadata update, hide every other kind=0 // if this event is for a metadata update, hide every other kind=0
// event from the same author that was issued earlier than this. // event from the same author that was issued earlier than this.
@@ -243,7 +310,11 @@ pub fn write_event(conn: &mut Connection, e: &Event) -> Result<usize> {
params![ev_id, hex::decode(&e.pubkey).ok(), e.created_at], params![ev_id, hex::decode(&e.pubkey).ok(), e.created_at],
)?; )?;
if update_count > 0 { if update_count > 0 {
info!("hid {} older metadata events", update_count); info!(
"hid {} older metadata events for author {:?}",
update_count,
e.get_author_prefix()
);
} }
} }
// if this event is for a contact update, hide every other kind=3 // if this event is for a contact update, hide every other kind=3
@@ -254,14 +325,18 @@ pub fn write_event(conn: &mut Connection, e: &Event) -> Result<usize> {
params![ev_id, hex::decode(&e.pubkey).ok(), e.created_at], params![ev_id, hex::decode(&e.pubkey).ok(), e.created_at],
)?; )?;
if update_count > 0 { if update_count > 0 {
info!("hid {} older contact events", update_count); info!(
"hid {} older contact events for author {:?}",
update_count,
e.get_author_prefix()
);
} }
} }
tx.commit()?; tx.commit()?;
Ok(ins_count) Ok(ins_count)
} }
/// Event resulting from a specific subscription request /// Serialized event associated with a specific subscription request.
#[derive(PartialEq, Debug, Clone)] #[derive(PartialEq, Debug, Clone)]
pub struct QueryResult { pub struct QueryResult {
/// Subscription identifier /// Subscription identifier
@@ -270,35 +345,58 @@ pub struct QueryResult {
pub event: String, pub event: String,
} }
/// Check if a string contains only hex characters. /// Produce a arbitrary list of '?' parameters.
fn is_hex(s: &str) -> bool { fn repeat_vars(count: usize) -> String {
s.chars().all(|x| char::is_ascii_hexdigit(&x)) if count == 0 {
return "".to_owned();
}
let mut s = "?,".repeat(count);
// Remove trailing comma
s.pop();
s
} }
/// Create a dynamic SQL query string from a subscription. /// Create a dynamic SQL query string and params from a subscription.
fn query_from_sub(sub: &Subscription) -> String { fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
// build a dynamic SQL query. all user-input is either an integer // build a dynamic SQL query. all user-input is either an integer
// (sqli-safe), or a string that is filtered to only contain // (sqli-safe), or a string that is filtered to only contain
// hexadecimal characters. // hexadecimal characters. Strings that require escaping (tag
// names/values) use parameters.
let mut query = let mut query =
"SELECT DISTINCT(e.content) FROM event e LEFT JOIN event_ref er ON e.id=er.event_id LEFT JOIN pubkey_ref pr ON e.id=pr.event_id " "SELECT DISTINCT(e.content) FROM event e LEFT JOIN tag t ON e.id=t.event_id ".to_owned();
.to_owned(); // parameters
let mut params: Vec<Box<dyn ToSql>> = vec![];
// for every filter in the subscription, generate a where clause // for every filter in the subscription, generate a where clause
let mut filter_clauses: Vec<String> = Vec::new(); let mut filter_clauses: Vec<String> = Vec::new();
for f in sub.filters.iter() { for f in sub.filters.iter() {
// individual filter components // individual filter components
let mut filter_components: Vec<String> = Vec::new(); let mut filter_components: Vec<String> = Vec::new();
// Query for "authors" // Query for "authors", allowing prefix matches
if f.authors.is_some() { if let Some(authvec) = &f.authors {
let authors_escaped: Vec<String> = f // take each author and convert to a hexsearch
.authors let mut auth_searches: Vec<String> = vec![];
.as_ref() for auth in authvec {
.unwrap() match hex_range(auth) {
.iter() Some(HexSearch::Exact(ex)) => {
.filter(|&x| is_hex(x)) auth_searches.push("author=?".to_owned());
.map(|x| format!("x'{}'", x)) params.push(Box::new(ex));
.collect(); }
let authors_clause = format!("author IN ({})", authors_escaped.join(", ")); Some(HexSearch::Range(lower, upper)) => {
auth_searches.push("(author>? AND author<?)".to_owned());
params.push(Box::new(lower));
params.push(Box::new(upper));
}
Some(HexSearch::LowerOnly(lower)) => {
auth_searches.push("author>?".to_owned());
params.push(Box::new(lower));
}
None => {
info!("Could not parse hex range from author {:?}", auth);
}
}
}
let authors_clause = format!("({})", auth_searches.join(" OR "));
filter_components.push(authors_clause); filter_components.push(authors_clause);
} }
// Query for Kind // Query for Kind
@@ -308,46 +406,60 @@ fn query_from_sub(sub: &Subscription) -> String {
let kind_clause = format!("kind IN ({})", str_kinds.join(", ")); let kind_clause = format!("kind IN ({})", str_kinds.join(", "));
filter_components.push(kind_clause); filter_components.push(kind_clause);
} }
// Query for event // Query for event, allowing prefix matches
if f.ids.is_some() { if let Some(idvec) = &f.ids {
let ids_escaped: Vec<String> = f // take each author and convert to a hexsearch
.ids let mut id_searches: Vec<String> = vec![];
.as_ref() for id in idvec {
.unwrap() match hex_range(id) {
.iter() Some(HexSearch::Exact(ex)) => {
.filter(|&x| is_hex(x)) id_searches.push("event_hash=?".to_owned());
.map(|x| format!("x'{}'", x)) params.push(Box::new(ex));
.collect(); }
let id_clause = format!("event_hash IN ({})", ids_escaped.join(", ")); Some(HexSearch::Range(lower, upper)) => {
id_searches.push("(event_hash>? AND event_hash<?)".to_owned());
params.push(Box::new(lower));
params.push(Box::new(upper));
}
Some(HexSearch::LowerOnly(lower)) => {
id_searches.push("event_hash>?".to_owned());
params.push(Box::new(lower));
}
None => {
info!("Could not parse hex range from id {:?}", id);
}
}
}
let id_clause = format!("({})", id_searches.join(" OR "));
filter_components.push(id_clause); filter_components.push(id_clause);
} }
// Query for referenced event // Query for tags
if f.events.is_some() { if let Some(map) = &f.tags {
let events_escaped: Vec<String> = f for (key, val) in map.iter() {
.events let mut str_vals: Vec<Box<dyn ToSql>> = vec![];
.as_ref() let mut blob_vals: Vec<Box<dyn ToSql>> = vec![];
.unwrap() for v in val {
.iter() if is_hex(v) {
.filter(|&x| is_hex(x)) if let Ok(h) = hex::decode(&v) {
.map(|x| format!("x'{}'", x)) blob_vals.push(Box::new(h));
.collect(); }
let events_clause = format!("referenced_event IN ({})", events_escaped.join(", ")); } else {
filter_components.push(events_clause); str_vals.push(Box::new(v.to_owned()));
}
}
// create clauses with "?" params for each tag value being searched
let str_clause = format!("value IN ({})", repeat_vars(str_vals.len()));
let blob_clause = format!("value_hex IN ({})", repeat_vars(blob_vals.len()));
let tag_clause = format!("(name=? AND ({} OR {}))", str_clause, blob_clause);
// add the tag name as the first parameter
params.push(Box::new(key.to_owned()));
// add all tag values that are plain strings as params
params.append(&mut str_vals);
// add all tag values that are blobs as params
params.append(&mut blob_vals);
filter_components.push(tag_clause);
} }
// Query for referenced pubkey
if f.pubkeys.is_some() {
let pubkeys_escaped: Vec<String> = f
.pubkeys
.as_ref()
.unwrap()
.iter()
.filter(|&x| is_hex(x))
.map(|x| format!("x'{}'", x))
.collect();
let pubkeys_clause = format!("referenced_pubkey IN ({})", pubkeys_escaped.join(", "));
filter_components.push(pubkeys_clause);
} }
// Query for timestamp // Query for timestamp
if f.since.is_some() { if f.since.is_some() {
let created_clause = format!("created_at > {}", f.since.unwrap()); let created_clause = format!("created_at > {}", f.since.unwrap());
@@ -365,21 +477,22 @@ fn query_from_sub(sub: &Subscription) -> String {
fc.push_str(&filter_components.join(" AND ")); fc.push_str(&filter_components.join(" AND "));
fc.push_str(" )"); fc.push_str(" )");
filter_clauses.push(fc); filter_clauses.push(fc);
} else {
// never display hidden events
filter_clauses.push("hidden!=TRUE".to_owned());
} }
} }
// never display hidden events
query.push_str(" WHERE hidden!=TRUE ");
// combine all filters with OR clauses, if any exist // combine all filters with OR clauses, if any exist
if !filter_clauses.is_empty() { if !filter_clauses.is_empty() {
query.push_str(" WHERE "); query.push_str(" AND (");
query.push_str(&filter_clauses.join(" OR ")); query.push_str(&filter_clauses.join(" OR "));
query.push_str(") ");
} }
// add order clause // add order clause
query.push_str(" ORDER BY created_at ASC"); query.push_str(" ORDER BY created_at ASC");
debug!("query string: {}", query); debug!("query string: {}", query);
query (query, params)
} }
/// Perform a database query using a subscription. /// Perform a database query using a subscription.
@@ -390,31 +503,38 @@ fn query_from_sub(sub: &Subscription) -> String {
/// query is immediately aborted. /// query is immediately aborted.
pub async fn db_query( pub async fn db_query(
sub: Subscription, sub: Subscription,
pool: SqlitePool,
query_tx: tokio::sync::mpsc::Sender<QueryResult>, query_tx: tokio::sync::mpsc::Sender<QueryResult>,
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>, mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
) { ) {
task::spawn_blocking(move || { task::spawn_blocking(move || {
let config = SETTINGS.read().unwrap();
let db_dir = &config.database.data_directory;
let full_path = Path::new(db_dir).join(DB_FILE);
let conn =
Connection::open_with_flags(&full_path, OpenFlags::SQLITE_OPEN_READ_ONLY).unwrap();
debug!("opened database for reading");
debug!("going to query for: {:?}", sub); debug!("going to query for: {:?}", sub);
let mut row_count: usize = 0;
let start = Instant::now();
// generate SQL query // generate SQL query
let q = query_from_sub(&sub); let (q, p) = query_from_sub(&sub);
// execute the query debug!("SQL generated in {:?}", start.elapsed());
let mut stmt = conn.prepare(&q).unwrap(); // show pool stats
let mut event_rows = stmt.query([]).unwrap(); debug!("DB pool stats: {:?}", pool.state());
while let Some(row) = event_rows.next().unwrap() { let start = Instant::now();
// check if this is still active (we could do this every N rows) if let Ok(conn) = pool.get() {
// execute the query. Don't cache, since queries vary so much.
let mut stmt = conn.prepare(&q)?;
let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?;
let mut first_result = true;
while let Some(row) = event_rows.next()? {
if first_result {
debug!("time to first result: {:?}", start.elapsed());
first_result = false;
}
// check if this is still active
// TODO: check every N rows
if abandon_query_rx.try_recv().is_ok() { if abandon_query_rx.try_recv().is_ok() {
debug!("query aborted"); debug!("query aborted");
return; return Ok(());
} }
// TODO: check before unwrapping row_count += 1;
let event_json = row.get(0).unwrap(); let event_json = row.get(0)?;
query_tx query_tx
.blocking_send(QueryResult { .blocking_send(QueryResult {
sub_id: sub.get_id(), sub_id: sub.get_id(),
@@ -422,6 +542,15 @@ pub async fn db_query(
}) })
.ok(); .ok();
} }
debug!("query completed"); debug!(
"query completed ({} rows) in {:?}",
row_count,
start.elapsed()
);
} else {
warn!("Could not get a database connection for querying");
}
let ok: Result<()> = Ok(());
ok
}); });
} }

View File

@@ -40,6 +40,41 @@ pub enum Error {
ConfigError(config::ConfigError), ConfigError(config::ConfigError),
#[error("Data directory does not exist")] #[error("Data directory does not exist")]
DatabaseDirError, DatabaseDirError,
#[error("Database Connection Pool Error")]
DatabasePoolError(r2d2::Error),
#[error("Custom Error : {0}")]
CustomError(String),
#[error("Task join error")]
JoinError,
#[error("Hyper Client error")]
HyperError(hyper::Error),
#[error("Unknown/Undocumented")]
UnknownError,
}
//impl From<Box<dyn std::error::Error>> for Error {
// fn from(e: Box<dyn std::error::Error>) -> Self {
// Error::CustomError("error".to_owned())
// }
//}
impl From<hyper::Error> for Error {
fn from(h: hyper::Error) -> Self {
Error::HyperError(h)
}
}
impl From<r2d2::Error> for Error {
fn from(d: r2d2::Error) -> Self {
Error::DatabasePoolError(d)
}
}
impl From<tokio::task::JoinError> for Error {
/// Wrap SQL error
fn from(_j: tokio::task::JoinError) -> Self {
Error::JoinError
}
} }
impl From<rusqlite::Error> for Error { impl From<rusqlite::Error> for Error {

View File

@@ -2,6 +2,8 @@
use crate::config; use crate::config;
use crate::error::Error::*; use crate::error::Error::*;
use crate::error::Result; use crate::error::Result;
use crate::nip05;
use crate::utils::unix_time;
use bitcoin_hashes::{sha256, Hash}; use bitcoin_hashes::{sha256, Hash};
use lazy_static::lazy_static; use lazy_static::lazy_static;
use log::*; use log::*;
@@ -9,21 +11,23 @@ use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
use serde::{Deserialize, Deserializer, Serialize}; use serde::{Deserialize, Deserializer, Serialize};
use serde_json::value::Value; use serde_json::value::Value;
use serde_json::Number; use serde_json::Number;
use std::collections::HashMap;
use std::collections::HashSet;
use std::str::FromStr; use std::str::FromStr;
use std::time::SystemTime;
lazy_static! { lazy_static! {
/// Secp256k1 verification instance.
pub static ref SECP: Secp256k1<VerifyOnly> = Secp256k1::verification_only(); pub static ref SECP: Secp256k1<VerifyOnly> = Secp256k1::verification_only();
} }
/// Event command in network format /// Event command in network format.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct EventCmd { pub struct EventCmd {
cmd: String, // expecting static "EVENT" cmd: String, // expecting static "EVENT"
event: Event, event: Event,
} }
/// Event parsed /// Parsed nostr event.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct Event { pub struct Event {
pub id: String, pub id: String,
@@ -35,6 +39,9 @@ pub struct Event {
pub(crate) tags: Vec<Vec<String>>, pub(crate) tags: Vec<Vec<String>>,
pub(crate) content: String, pub(crate) content: String,
pub(crate) sig: String, pub(crate) sig: String,
// Optimization for tag search, built on demand
#[serde(skip)]
pub(crate) tagidx: Option<HashMap<String, HashSet<String>>>,
} }
/// Simple tag type for array of array of strings. /// Simple tag type for array of array of strings.
@@ -56,26 +63,66 @@ impl From<EventCmd> for Result<Event> {
if ec.cmd != "EVENT" { if ec.cmd != "EVENT" {
Err(CommandUnknownError) Err(CommandUnknownError)
} else if ec.event.is_valid() { } else if ec.event.is_valid() {
Ok(ec.event) let mut e = ec.event;
e.build_index();
Ok(e)
} else { } else {
Err(EventInvalid) Err(EventInvalid)
} }
} }
} }
/// Seconds since 1970
fn unix_time() -> u64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
.unwrap_or(0)
}
impl Event { impl Event {
pub fn is_kind_metadata(&self) -> bool {
self.kind == 0
}
/// Pull a NIP-05 Name out of the event, if one exists
pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
if self.is_kind_metadata() {
// very quick check if we should attempt to parse this json
if self.content.contains("\"nip05\"") {
// Parse into JSON
let md_parsed: Value = serde_json::from_str(&self.content).ok()?;
let md_map = md_parsed.as_object()?;
let nip05_str = md_map.get("nip05")?.as_str()?;
return nip05::Nip05Name::try_from(nip05_str).ok();
}
}
None
}
/// Build an event tag index
fn build_index(&mut self) {
// if there are no tags; just leave the index as None
if self.tags.is_empty() {
return;
}
// otherwise, build an index
let mut idx: HashMap<String, HashSet<String>> = HashMap::new();
// iterate over tags that have at least 2 elements
for t in self.tags.iter().filter(|x| x.len() > 1) {
let tagname = t.get(0).unwrap();
let tagval = t.get(1).unwrap();
// ensure a vector exists for this tag
if !idx.contains_key(tagname) {
idx.insert(tagname.clone(), HashSet::new());
}
// get the tag vec and insert entry
let tidx = idx.get_mut(tagname).expect("could not get tag vector");
tidx.insert(tagval.clone());
}
// save the tag structure
self.tagidx = Some(idx);
}
/// Create a short event identifier, suitable for logging. /// Create a short event identifier, suitable for logging.
pub fn get_event_id_prefix(&self) -> String { pub fn get_event_id_prefix(&self) -> String {
self.id.chars().take(8).collect() self.id.chars().take(8).collect()
} }
pub fn get_author_prefix(&self) -> String {
self.pubkey.chars().take(8).collect()
}
/// Check if this event has a valid signature. /// Check if this event has a valid signature.
fn is_valid(&self) -> bool { fn is_valid(&self) -> bool {
@@ -101,26 +148,35 @@ impl Event {
// ** [0, pubkey-hex-string, created-at-num, kind-num, tags-array-of-arrays, content-string] // ** [0, pubkey-hex-string, created-at-num, kind-num, tags-array-of-arrays, content-string]
// * serialize with no spaces/newlines // * serialize with no spaces/newlines
let c_opt = self.to_canonical(); let c_opt = self.to_canonical();
debug!("Canonical: {:?}", &c_opt);
debug!("Canonical: {}", c_opt.as_ref().unwrap());
if c_opt.is_none() { if c_opt.is_none() {
info!("event could not be canonicalized"); debug!("event could not be canonicalized");
return false; return false;
} }
let c = c_opt.unwrap(); let c = c_opt.unwrap();
// * compute the sha256sum. // * compute the sha256sum.
let digest: sha256::Hash = sha256::Hash::hash(c.as_bytes()); let digest: sha256::Hash = sha256::Hash::hash(c.as_bytes());
let hex_digest = format!("{:x}", digest); let hex_digest = format!("{:x}", digest);
debug!("hex is: {}", hex_digest);
// * ensure the id matches the computed sha256sum. // * ensure the id matches the computed sha256sum.
if self.id != hex_digest { if self.id != hex_digest {
debug!("event id does not match digest");
return false; return false;
} }
// * validate the message digest (sig) using the pubkey & computed sha256 message hash. // * validate the message digest (sig) using the pubkey & computed sha256 message hash.
let sig = schnorr::Signature::from_str(&self.sig).unwrap(); let sig = schnorr::Signature::from_str(&self.sig).unwrap();
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) { if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
let pubkey = XOnlyPublicKey::from_str(&self.pubkey).unwrap(); if let Ok(pubkey) = XOnlyPublicKey::from_str(&self.pubkey) {
let verify = SECP.verify_schnorr(&sig, &msg, &pubkey); let verify = SECP.verify_schnorr(&sig, &msg, &pubkey);
matches!(verify, Ok(())) matches!(verify, Ok(()))
} else { } else {
warn!("Error converting digest to secp256k1 message"); debug!("Client sent malformed pubkey");
false
}
} else {
info!("Error converting digest to secp256k1 message");
false false
} }
} }
@@ -162,36 +218,18 @@ impl Event {
serde_json::Value::Array(tags) serde_json::Value::Array(tags)
} }
/// Get a list of event tags. /// Determine if the given tag and value set intersect with tags in this event.
pub fn get_event_tags(&self) -> Vec<&str> { pub fn generic_tag_val_intersect(&self, tagname: &str, check: &HashSet<String>) -> bool {
let mut etags = vec![]; match &self.tagidx {
for t in self.tags.iter() { Some(idx) => match idx.get(tagname) {
if t.len() >= 2 && t.get(0).unwrap() == "e" { Some(valset) => {
etags.push(&t.get(1).unwrap()[..]); let common = valset.intersection(check);
common.count() > 0
} }
None => false,
},
None => false,
} }
etags
}
/// Get a list of pubkey/petname tags.
pub fn get_pubkey_tags(&self) -> Vec<&str> {
let mut ptags = vec![];
for t in self.tags.iter() {
if t.len() >= 2 && t.get(0).unwrap() == "p" {
ptags.push(&t.get(1).unwrap()[..]);
}
}
ptags
}
/// Check if a given event is referenced in an event tag.
pub fn event_tag_match(&self, eventid: &str) -> bool {
self.get_event_tags().contains(&eventid)
}
/// Check if a given event is referenced in an event tag.
pub fn pubkey_tag_match(&self, pubkey: &str) -> bool {
self.get_pubkey_tags().contains(&pubkey)
} }
} }
@@ -207,6 +245,7 @@ mod tests {
tags: vec![], tags: vec![],
content: "".to_owned(), content: "".to_owned(),
sig: "0".to_owned(), sig: "0".to_owned(),
tagidx: None,
} }
} }
@@ -229,7 +268,8 @@ mod tests {
#[test] #[test]
fn empty_event_tag_match() -> Result<()> { fn empty_event_tag_match() -> Result<()> {
let event = simple_event(); let event = simple_event();
assert!(!event.event_tag_match("foo")); assert!(!event
.generic_tag_val_intersect("e", &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
Ok(()) Ok(())
} }
@@ -237,7 +277,14 @@ mod tests {
fn single_event_tag_match() -> Result<()> { fn single_event_tag_match() -> Result<()> {
let mut event = simple_event(); let mut event = simple_event();
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]]; event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
assert!(event.event_tag_match("foo")); event.build_index();
assert_eq!(
event.generic_tag_val_intersect(
"e",
&HashSet::from(["foo".to_owned(), "bar".to_owned()])
),
true
);
Ok(()) Ok(())
} }
@@ -281,6 +328,7 @@ mod tests {
tags: vec![], tags: vec![],
content: "this is a test".to_owned(), content: "this is a test".to_owned(),
sig: "abcde".to_owned(), sig: "abcde".to_owned(),
tagidx: None,
}; };
let c = e.to_canonical(); let c = e.to_canonical();
let expected = Some(r#"[0,"012345",501234,1,[],"this is a test"]"#.to_owned()); let expected = Some(r#"[0,"012345",501234,1,[],"this is a test"]"#.to_owned());
@@ -304,6 +352,7 @@ mod tests {
], ],
content: "this is a test".to_owned(), content: "this is a test".to_owned(),
sig: "abcde".to_owned(), sig: "abcde".to_owned(),
tagidx: None,
}; };
let c = e.to_canonical(); let c = e.to_canonical();
let expected_json = r###"[0,"012345",501234,1,[["#e","aoeu"],["#p","aaaa","ws://example.com"]],"this is a test"]"###; let expected_json = r###"[0,"012345",501234,1,[["#e","aoeu"],["#p","aaaa","ws://example.com"]],"this is a test"]"###;

159
src/hexrange.rs Normal file
View File

@@ -0,0 +1,159 @@
//! Utilities for searching hexadecimal
use crate::utils::is_hex;
use hex;
/// Types of hexadecimal queries.
#[derive(PartialEq, Debug, Clone)]
pub enum HexSearch {
// when no range is needed, exact 32-byte
Exact(Vec<u8>),
// lower (inclusive) and upper range (exclusive)
Range(Vec<u8>, Vec<u8>),
// lower bound only, upper bound is MAX inclusive
LowerOnly(Vec<u8>),
}
/// Check if a string contains only f chars
fn is_all_fs(s: &str) -> bool {
s.chars().all(|x| x == 'f' || x == 'F')
}
/// Find the next hex sequence greater than the argument.
pub fn hex_range(s: &str) -> Option<HexSearch> {
// handle special cases
if !is_hex(s) || s.len() > 64 {
return None;
}
if s.len() == 64 {
return Some(HexSearch::Exact(hex::decode(s).ok()?));
}
// if s is odd, add a zero
let mut hash_base = s.to_owned();
let mut odd = hash_base.len() % 2 != 0;
if odd {
// extend the string to make it even
hash_base.push('0');
}
let base = hex::decode(hash_base).ok()?;
// check for all ff's
if is_all_fs(s) {
// there is no higher bound, we only want to search for blobs greater than this.
return Some(HexSearch::LowerOnly(base));
}
// return a range
let mut upper = base.clone();
let mut byte_len = upper.len();
// for odd strings, we made them longer, but we want to increment the upper char (+16).
// we know we can do this without overflowing because we explicitly set the bottom half to 0's.
while byte_len > 0 {
byte_len -= 1;
// check if byte can be incremented, or if we need to carry.
let b = upper[byte_len];
if b == u8::MAX {
// reset and carry
upper[byte_len] = 0;
} else if odd {
// check if first char in this byte is NOT 'f'
if b < 240 {
upper[byte_len] = b + 16; // bump up the first character in this byte
// increment done, stop iterating through the vec
break;
} else {
// if it is 'f', reset the byte to 0 and do a carry
// reset and carry
upper[byte_len] = 0;
}
// done with odd logic, so don't repeat this
odd = false;
} else {
// bump up the first character in this byte
upper[byte_len] = b + 1;
// increment done, stop iterating
break;
}
}
Some(HexSearch::Range(base, upper))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::error::Result;
#[test]
fn hex_range_exact() -> Result<()> {
let hex = "abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00";
let r = hex_range(hex);
assert_eq!(
r,
Some(HexSearch::Exact(hex::decode(hex).expect("invalid hex")))
);
Ok(())
}
#[test]
fn hex_full_range() -> Result<()> {
let hex = "aaaa";
let hex_upper = "aaab";
let r = hex_range(hex);
assert_eq!(
r,
Some(HexSearch::Range(
hex::decode(hex).expect("invalid hex"),
hex::decode(hex_upper).expect("invalid hex")
))
);
Ok(())
}
#[test]
fn hex_full_range_odd() -> Result<()> {
let r = hex_range("abc");
assert_eq!(
r,
Some(HexSearch::Range(
hex::decode("abc0").expect("invalid hex"),
hex::decode("abd0").expect("invalid hex")
))
);
Ok(())
}
#[test]
fn hex_full_range_odd_end_f() -> Result<()> {
let r = hex_range("abf");
assert_eq!(
r,
Some(HexSearch::Range(
hex::decode("abf0").expect("invalid hex"),
hex::decode("ac00").expect("invalid hex")
))
);
Ok(())
}
#[test]
fn hex_no_upper() -> Result<()> {
let r = hex_range("ffff");
assert_eq!(
r,
Some(HexSearch::LowerOnly(
hex::decode("ffff").expect("invalid hex")
))
);
Ok(())
}
#[test]
fn hex_no_upper_odd() -> Result<()> {
let r = hex_range("fff");
assert_eq!(
r,
Some(HexSearch::LowerOnly(
hex::decode("fff0").expect("invalid hex")
))
);
Ok(())
}
}

43
src/info.rs Normal file
View File

@@ -0,0 +1,43 @@
//! Relay metadata using NIP-11
/// Relay Info
use crate::config;
use serde::{Deserialize, Serialize};
pub const CARGO_PKG_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
#[derive(Debug, Serialize, Deserialize)]
#[allow(unused)]
pub struct RelayInfo {
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub pubkey: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub contact: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub supported_nips: Option<Vec<i64>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub software: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
/// Convert an Info configuration into public Relay Info
impl From<config::Info> for RelayInfo {
fn from(i: config::Info) -> Self {
RelayInfo {
id: i.relay_url,
name: i.name,
description: i.description,
pubkey: i.pubkey,
contact: i.contact,
supported_nips: Some(vec![1, 2, 11]),
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
version: CARGO_PKG_VERSION.map(|x| x.to_owned()),
}
}
}

View File

@@ -4,5 +4,9 @@ pub mod conn;
pub mod db; pub mod db;
pub mod error; pub mod error;
pub mod event; pub mod event;
pub mod protostream; pub mod hexrange;
pub mod info;
pub mod nip05;
pub mod schema;
pub mod subscription; pub mod subscription;
pub mod utils;

View File

@@ -1,6 +1,7 @@
//! Server process //! Server process
use futures::SinkExt; use futures::SinkExt;
use futures::StreamExt; use futures::StreamExt;
use hyper::header::ACCEPT;
use hyper::service::{make_service_fn, service_fn}; use hyper::service::{make_service_fn, service_fn};
use hyper::upgrade::Upgraded; use hyper::upgrade::Upgraded;
use hyper::{ use hyper::{
@@ -8,46 +9,57 @@ use hyper::{
}; };
use log::*; use log::*;
use nostr_rs_relay::close::Close; use nostr_rs_relay::close::Close;
use nostr_rs_relay::close::CloseCmd;
use nostr_rs_relay::config; use nostr_rs_relay::config;
use nostr_rs_relay::conn; use nostr_rs_relay::conn;
use nostr_rs_relay::db; use nostr_rs_relay::db;
use nostr_rs_relay::db::SubmittedEvent;
use nostr_rs_relay::error::{Error, Result}; use nostr_rs_relay::error::{Error, Result};
use nostr_rs_relay::event::Event; use nostr_rs_relay::event::Event;
use nostr_rs_relay::protostream; use nostr_rs_relay::event::EventCmd;
use nostr_rs_relay::protostream::NostrMessage::*; use nostr_rs_relay::info::RelayInfo;
use nostr_rs_relay::protostream::NostrResponse::*; use nostr_rs_relay::nip05;
use nostr_rs_relay::subscription::Subscription;
use serde::{Deserialize, Serialize};
use std::collections::HashMap; use std::collections::HashMap;
use std::convert::Infallible; use std::convert::Infallible;
use std::env; use std::env;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::path::Path; use std::path::Path;
use std::time::Duration;
use std::time::Instant;
use tokio::runtime::Builder; use tokio::runtime::Builder;
use tokio::sync::broadcast; use tokio::sync::broadcast::{self, Receiver, Sender};
use tokio::sync::broadcast::{Receiver, Sender};
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tokio::sync::oneshot; use tokio::sync::oneshot;
use tokio_tungstenite::WebSocketStream; use tokio_tungstenite::WebSocketStream;
use tungstenite::error::Error as WsError;
use tungstenite::handshake; use tungstenite::handshake;
use tungstenite::protocol::Message;
use tungstenite::protocol::WebSocketConfig; use tungstenite::protocol::WebSocketConfig;
/// Return a requested DB name from command line arguments.
fn db_from_args(args: Vec<String>) -> Option<String> { fn db_from_args(args: Vec<String>) -> Option<String> {
if args.len() == 3 && args.get(1) == Some(&"--db".to_owned()) { if args.len() == 3 && args.get(1) == Some(&"--db".to_owned()) {
return args.get(2).map(|x| x.to_owned()); return args.get(2).map(|x| x.to_owned());
} }
None None
} }
/// Handle arbitrary HTTP requests, including for WebSocket upgrades.
async fn handle_web_request( async fn handle_web_request(
mut request: Request<Body>, mut request: Request<Body>,
pool: db::SqlitePool,
remote_addr: SocketAddr, remote_addr: SocketAddr,
broadcast: Sender<Event>, broadcast: Sender<Event>,
event_tx: tokio::sync::mpsc::Sender<Event>, event_tx: tokio::sync::mpsc::Sender<SubmittedEvent>,
shutdown: Receiver<()>, shutdown: Receiver<()>,
) -> Result<Response<Body>, Infallible> { ) -> Result<Response<Body>, Infallible> {
match ( match (
request.uri().path(), request.uri().path(),
request.headers().contains_key(header::UPGRADE), request.headers().contains_key(header::UPGRADE),
) { ) {
//if the request is ws_echo and the request headers contains an Upgrade key // Request for / as websocket
("/", true) => { ("/", true) => {
debug!("websocket with upgrade request"); debug!("websocket with upgrade request");
//assume request is a handshake, so create the handshake response //assume request is a handshake, so create the handshake response
@@ -62,17 +74,25 @@ async fn handle_web_request(
match upgrade::on(&mut request).await { match upgrade::on(&mut request).await {
//if successfully upgraded //if successfully upgraded
Ok(upgraded) => { Ok(upgraded) => {
// set WebSocket configuration options
let mut config = WebSocketConfig::default();
{
let settings = config::SETTINGS.read().unwrap();
config.max_message_size = settings.limits.max_ws_message_bytes;
config.max_frame_size = settings.limits.max_ws_frame_bytes;
}
//create a websocket stream from the upgraded object //create a websocket stream from the upgraded object
let ws_stream = WebSocketStream::from_raw_socket( let ws_stream = WebSocketStream::from_raw_socket(
//pass the upgraded object //pass the upgraded object
//as the base layer stream of the Websocket //as the base layer stream of the Websocket
upgraded, upgraded,
tokio_tungstenite::tungstenite::protocol::Role::Server, tokio_tungstenite::tungstenite::protocol::Role::Server,
None, Some(config),
) )
.await; .await;
tokio::spawn(nostr_server( tokio::spawn(nostr_server(
ws_stream, broadcast, event_tx, shutdown, pool, ws_stream, broadcast, event_tx, shutdown,
)); ));
} }
Err(e) => println!( Err(e) => println!(
@@ -96,10 +116,30 @@ async fn handle_web_request(
}; };
Ok::<_, Infallible>(response) Ok::<_, Infallible>(response)
} }
// Request for Relay info
("/", false) => { ("/", false) => {
// handle request at root with no upgrade header // handle request at root with no upgrade header
// Check if this is a nostr server info request
let accept_header = &request.headers().get(ACCEPT);
// check if application/nostr+json is included
if let Some(media_types) = accept_header {
if let Ok(mt_str) = media_types.to_str() {
if mt_str.contains("application/nostr+json") {
let config = config::SETTINGS.read().unwrap();
// build a relay info response
debug!("Responding to server info request");
let rinfo = RelayInfo::from(config.info.clone());
let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap());
return Ok(Response::builder()
.status(200)
.header("Content-Type", "application/nostr+json")
.body(b)
.unwrap());
}
}
}
Ok(Response::new(Body::from( Ok(Response::new(Body::from(
"This is a Nostr relay.\n".to_string(), "Please use a Nostr client to connect.",
))) )))
} }
(_, _) => { (_, _) => {
@@ -137,15 +177,45 @@ fn main() -> Result<(), Error> {
*settings = c; *settings = c;
} }
let config = config::SETTINGS.read().unwrap(); let settings = config::SETTINGS.read().unwrap();
trace!("Config: {:?}", settings);
// do some config validation. // do some config validation.
if !Path::new(&config.database.data_directory).is_dir() { if !Path::new(&settings.database.data_directory).is_dir() {
error!("Database directory does not exist"); error!("Database directory does not exist");
return Err(Error::DatabaseDirError); return Err(Error::DatabaseDirError);
} }
debug!("config: {:?}", config); let addr = format!(
let addr = format!("{}:{}", config.network.address.trim(), config.network.port); "{}:{}",
settings.network.address.trim(),
settings.network.port
);
let socket_addr = addr.parse().expect("listening address not valid"); let socket_addr = addr.parse().expect("listening address not valid");
// address whitelisting settings
if let Some(addr_whitelist) = &settings.authorization.pubkey_whitelist {
info!(
"Event publishing restricted to {} pubkey(s)",
addr_whitelist.len()
);
}
// check if NIP-05 enforced user verification is on
if settings.verified_users.is_active() {
info!(
"NIP-05 user verification mode:{:?}",
settings.verified_users.mode
);
if let Some(d) = settings.verified_users.verify_update_duration() {
info!("NIP-05 check user verification every: {:?}", d);
}
if let Some(d) = settings.verified_users.verify_expiration_duration() {
info!("NIP-05 user verification expires after: {:?}", d);
}
if let Some(wl) = &settings.verified_users.domain_whitelist {
info!("NIP-05 domain whitelist: {:?}", wl);
}
if let Some(bl) = &settings.verified_users.domain_blacklist {
info!("NIP-05 domain blacklist: {:?}", bl);
}
}
// configure tokio runtime // configure tokio runtime
let rt = Builder::new_multi_thread() let rt = Builder::new_multi_thread()
.enable_all() .enable_all()
@@ -163,25 +233,62 @@ fn main() -> Result<(), Error> {
let (bcast_tx, _) = broadcast::channel::<Event>(settings.limits.broadcast_buffer); let (bcast_tx, _) = broadcast::channel::<Event>(settings.limits.broadcast_buffer);
// validated events that need to be persisted are sent to the // validated events that need to be persisted are sent to the
// database on via this channel. // database on via this channel.
let (event_tx, event_rx) = mpsc::channel::<Event>(settings.limits.event_persist_buffer); let (event_tx, event_rx) =
mpsc::channel::<SubmittedEvent>(settings.limits.event_persist_buffer);
// establish a channel for letting all threads now about a // establish a channel for letting all threads now about a
// requested server shutdown. // requested server shutdown.
let (invoke_shutdown, _) = broadcast::channel::<()>(1); let (invoke_shutdown, shutdown_listen) = broadcast::channel::<()>(1);
let ctrl_c_shutdown = invoke_shutdown.clone(); // create a channel for sending any new metadata event. These
// will get processed relatively slowly (a potentially
// multi-second blocking HTTP call) on a single thread, so we
// buffer requests on the channel. No harm in dropping events
// here, since we are protecting against DoS. This can make
// it difficult to setup initial metadata in bulk, since
// overwhelming this will drop events and won't register
// metadata events.
let (metadata_tx, metadata_rx) = broadcast::channel::<Event>(4096);
// start the database writer thread. Give it a channel for
// writing events, and for publishing events that have been
// written (to all connected clients).
db::db_writer(
event_rx,
bcast_tx.clone(),
metadata_tx.clone(),
shutdown_listen,
)
.await;
info!("db writer created");
// create a nip-05 verifier thread
let verifier_opt = nip05::Verifier::new(metadata_rx, bcast_tx.clone());
if let Ok(mut v) = verifier_opt {
if settings.verified_users.is_active() {
tokio::task::spawn(async move {
info!("starting up NIP-05 verifier...");
v.run().await;
});
}
}
// // listen for ctrl-c interruupts // // listen for ctrl-c interruupts
let ctrl_c_shutdown = invoke_shutdown.clone();
tokio::spawn(async move { tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap(); tokio::signal::ctrl_c().await.unwrap();
info!("shutting down due to SIGINT"); info!("shutting down due to SIGINT");
ctrl_c_shutdown.send(()).ok(); ctrl_c_shutdown.send(()).ok();
}); });
// start the database writer thread. Give it a channel for // build a connection pool for sqlite connections
// writing events, and for publishing events that have been let pool = db::build_pool(
// written (to all connected clients). "client query",
db::db_writer(event_rx, bcast_tx.clone(), invoke_shutdown.subscribe()).await; rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY
info!("db writer created"); | rusqlite::OpenFlags::SQLITE_OPEN_SHARED_CACHE,
settings.database.min_conn,
settings.database.max_conn,
true,
);
// A `Service` is needed for every connection, so this // A `Service` is needed for every connection, so this
// creates one from our `handle_request` function. // creates one from our `handle_request` function.
let make_svc = make_service_fn(|conn: &AddrStream| { let make_svc = make_service_fn(|conn: &AddrStream| {
let svc_pool = pool.clone();
let remote_addr = conn.remote_addr(); let remote_addr = conn.remote_addr();
let bcast = bcast_tx.clone(); let bcast = bcast_tx.clone();
let event = event_tx.clone(); let event = event_tx.clone();
@@ -191,6 +298,7 @@ fn main() -> Result<(), Error> {
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| { Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
handle_web_request( handle_web_request(
request, request,
svc_pool.clone(),
remote_addr, remote_addr,
bcast.clone(), bcast.clone(),
event.clone(), event.clone(),
@@ -211,55 +319,113 @@ fn main() -> Result<(), Error> {
Ok(()) Ok(())
} }
/// Nostr protocol messages from a client
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
#[serde(untagged)]
pub enum NostrMessage {
/// An `EVENT` message
EventMsg(EventCmd),
/// A `REQ` message
SubMsg(Subscription),
/// A `CLOSE` message
CloseMsg(CloseCmd),
}
/// Convert Message to NostrMessage
fn convert_to_msg(msg: String) -> Result<NostrMessage> {
let config = config::SETTINGS.read().unwrap();
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
match parsed_res {
Ok(m) => {
if let NostrMessage::EventMsg(_) = m {
if let Some(max_size) = config.limits.max_event_bytes {
// check length, ensure that some max size is set.
if msg.len() > max_size && max_size > 0 {
return Err(Error::EventMaxLengthError(msg.len()));
}
}
}
Ok(m)
}
Err(e) => {
debug!("proto parse error: {:?}", e);
debug!("parse error on message: {}", msg.trim());
Err(Error::ProtoParseError)
}
}
}
/// Handle new client connections. This runs through an event loop /// Handle new client connections. This runs through an event loop
/// for all client communication. /// for all client communication.
async fn nostr_server( async fn nostr_server(
ws_stream: WebSocketStream<Upgraded>, pool: db::SqlitePool,
mut ws_stream: WebSocketStream<Upgraded>,
broadcast: Sender<Event>, broadcast: Sender<Event>,
event_tx: tokio::sync::mpsc::Sender<Event>, event_tx: mpsc::Sender<SubmittedEvent>,
mut shutdown: Receiver<()>, mut shutdown: Receiver<()>,
) { ) {
// get a broadcast channel for clients to communicate on // get a broadcast channel for clients to communicate on
let mut bcast_rx = broadcast.subscribe(); let mut bcast_rx = broadcast.subscribe();
let mut config = WebSocketConfig::default();
{
let settings = config::SETTINGS.read().unwrap();
config.max_message_size = settings.limits.max_ws_message_bytes;
config.max_frame_size = settings.limits.max_ws_frame_bytes;
}
// upgrade the TCP connection to WebSocket
//let conn = tokio_tungstenite::accept_async_with_config(stream, Some(config)).await;
//let ws_stream = conn.expect("websocket handshake error");
// wrap websocket into a stream & sink of Nostr protocol messages
let mut nostr_stream = protostream::wrap_ws_in_nostr(ws_stream);
// Track internal client state // Track internal client state
let mut conn = conn::ClientConn::new(); let mut conn = conn::ClientConn::new();
let cid = conn.get_client_prefix(); let cid = conn.get_client_prefix();
// Create a channel for receiving query results from the database. // Create a channel for receiving query results from the database.
// we will send out the tx handle to any query we generate. // we will send out the tx handle to any query we generate.
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(256); let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(256);
// Create channel for receiving NOTICEs
let (notice_tx, mut notice_rx) = mpsc::channel::<String>(32);
// last time this client sent data (message, ping, etc.)
let mut last_message_time = Instant::now();
// ping interval (every 5 minutes)
let default_ping_dur = Duration::from_secs(300);
// disconnect after 20 minutes without a ping response or event.
let max_quiet_time = Duration::from_secs(60 * 20);
let start = tokio::time::Instant::now() + default_ping_dur;
let mut ping_interval = tokio::time::interval_at(start, default_ping_dur);
// maintain a hashmap of a oneshot channel for active subscriptions. // maintain a hashmap of a oneshot channel for active subscriptions.
// when these subscriptions are cancelled, make a message // when these subscriptions are cancelled, make a message
// available to the executing query so it knows to stop. // available to the executing query so it knows to stop.
//let (abandon_query_tx, _) = oneshot::channel::<()>();
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new(); let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
// for stats, keep track of how many events the client published, // for stats, keep track of how many events the client published,
// and how many it received from queries. // and how many it received from queries.
let mut client_published_event_count: usize = 0; let mut client_published_event_count: usize = 0;
let mut client_received_event_count: usize = 0; let mut client_received_event_count: usize = 0;
info!("new connection for client: {}", cid); info!("new connection for client: {:?}", cid);
loop { loop {
tokio::select! { tokio::select! {
_ = shutdown.recv() => { _ = shutdown.recv() => {
// server shutting down, exit loop // server shutting down, exit loop
break; break;
}, },
_ = ping_interval.tick() => {
// check how long since we talked to client
// if it has been too long, disconnect
if last_message_time.elapsed() > max_quiet_time {
debug!("ending connection due to lack of client ping response");
break;
}
// Send a ping
ws_stream.send(Message::Ping(Vec::new())).await.ok();
},
Some(notice_msg) = notice_rx.recv() => {
let n = notice_msg.to_string().replace("\"", "");
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", n))).await.ok();
},
Some(query_result) = query_rx.recv() => { Some(query_result) = query_rx.recv() => {
// database informed us of a query result we asked for // database informed us of a query result we asked for
let res = EventRes(query_result.sub_id,query_result.event);
client_received_event_count += 1; client_received_event_count += 1;
nostr_stream.send(res).await.ok(); // send a result
let subesc = query_result.sub_id.replace("\"", "");
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
ws_stream.send(Message::Text(send_str)).await.ok();
}, },
// TODO: consider logging the LaggedRecv error
Ok(global_event) = bcast_rx.recv() => { Ok(global_event) = bcast_rx.recv() => {
// an event has been broadcast to all clients // an event has been broadcast to all clients
// first check if there is a subscription for this event. // first check if there is a subscription for this event.
@@ -268,39 +434,78 @@ async fn nostr_server(
// TODO: serialize at broadcast time, instead of // TODO: serialize at broadcast time, instead of
// once for each consumer. // once for each consumer.
if let Ok(event_str) = serde_json::to_string(&global_event) { if let Ok(event_str) = serde_json::to_string(&global_event) {
debug!("sub match: client: {}, sub: {}, event: {}", debug!("sub match: client: {:?}, sub: {:?}, event: {:?}",
cid, s, cid, s,
global_event.get_event_id_prefix()); global_event.get_event_id_prefix());
// create an event response and send it // create an event response and send it
let res = EventRes(s.to_owned(),event_str); let subesc = s.replace("\"", "");
nostr_stream.send(res).await.ok(); ws_stream.send(Message::Text(format!("[\"EVENT\",\"{}\",{}]", subesc, event_str))).await.ok();
//nostr_stream.send(res).await.ok();
} else { } else {
warn!("could not convert event to string"); warn!("could not serialize event {:?}", global_event.get_event_id_prefix());
} }
} }
}, },
// check if this client has a subscription ws_next = ws_stream.next() => {
proto_next = nostr_stream.next() => { // update most recent message time for client
match proto_next { last_message_time = Instant::now();
Some(Ok(EventMsg(ec))) => { // Consume text messages from the client, parse into Nostr messages.
let nostr_msg = match ws_next {
Some(Ok(Message::Text(m))) => {
convert_to_msg(m)
},
Some(Ok(Message::Binary(_))) => {
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "binary messages are not accepted"))).await.ok();
continue;
},
Some(Ok(Message::Ping(_))) | Some(Ok(Message::Pong(_))) => {
// get a ping/pong, ignore. tungstenite will
// send responses automatically.
continue;
},
None |
Some(Ok(Message::Close(_))) |
Some(Err(WsError::AlreadyClosed)) |
Some(Err(WsError::ConnectionClosed)) |
Some(Err(WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
=> {
debug!("websocket close from client: {:?}",cid);
break;
},
Some(Err(WsError::Io(e))) => {
// IO errors are considered fatal
warn!("IO error (client: {:?}): {:?}", cid, e);
break;
}
x => {
// default condition on error is to close the client connection
info!("unknown error (client: {:?}): {:?} (closing conn)", cid, x);
break;
}
};
// convert ws_next into proto_next
match nostr_msg {
Ok(NostrMessage::EventMsg(ec)) => {
// An EventCmd needs to be validated to be converted into an Event // An EventCmd needs to be validated to be converted into an Event
// handle each type of message // handle each type of message
let parsed : Result<Event> = Result::<Event>::from(ec); let parsed : Result<Event> = Result::<Event>::from(ec);
match parsed { match parsed {
Ok(e) => { Ok(e) => {
let id_prefix:String = e.id.chars().take(8).collect(); let id_prefix:String = e.id.chars().take(8).collect();
debug!("successfully parsed/validated event: {} from client: {}", id_prefix, cid); debug!("successfully parsed/validated event: {:?} from client: {:?}", id_prefix, cid);
// Write this to the database // Write this to the database.
event_tx.send(e.clone()).await.ok(); let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone() };
event_tx.send(submit_event).await.ok();
client_published_event_count += 1; client_published_event_count += 1;
}, },
Err(_) => { Err(_) => {
info!("client {} sent an invalid event", cid); info!("client {:?} sent an invalid event", cid);
nostr_stream.send(NoticeRes("event was invalid".to_owned())).await.ok(); ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "event was invalid"))).await.ok();
} }
} }
}, },
Some(Ok(SubMsg(s))) => { Ok(NostrMessage::SubMsg(s)) => {
debug!("client {} requesting a subscription", cid); debug!("client {} requesting a subscription", cid);
// subscription handling consists of: // subscription handling consists of:
// * registering the subscription so future events can be matched // * registering the subscription so future events can be matched
@@ -309,18 +514,21 @@ async fn nostr_server(
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>(); let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
match conn.subscribe(s.clone()) { match conn.subscribe(s.clone()) {
Ok(()) => { Ok(()) => {
running_queries.insert(s.id.to_owned(), abandon_query_tx); // when we insert, if there was a previous query running with the same name, cancel it.
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
previous_query.send(()).ok();
}
// start a database query // start a database query
db::db_query(s, query_tx.clone(), abandon_query_rx).await; db::db_query(s, pool.clone(), query_tx.clone(), abandon_query_rx).await;
}, },
Err(e) => { Err(e) => {
info!("Subscription error: {}", e); info!("Subscription error: {}", e);
nostr_stream.send(NoticeRes(format!("{}",e))).await.ok(); let s = e.to_string().replace("\"", "");
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", s))).await.ok();
} }
} }
}, },
Some(Ok(CloseMsg(cc))) => { Ok(NostrMessage::CloseMsg(cc)) => {
// closing a request simply removes the subscription. // closing a request simply removes the subscription.
let parsed : Result<Close> = Result::<Close>::from(cc); let parsed : Result<Close> = Result::<Close>::from(cc);
match parsed { match parsed {
@@ -337,24 +545,24 @@ async fn nostr_server(
}, },
Err(_) => { Err(_) => {
info!("invalid command ignored"); info!("invalid command ignored");
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "could not parse command"))).await.ok();
} }
} }
}, },
None => { Err(Error::ConnError) => {
debug!("normal websocket close from client: {}",cid); debug!("got connection close/error, disconnecting client: {:?}",cid);
break;
},
Some(Err(Error::ConnError)) => {
debug!("got connection close/error, disconnecting client: {}",cid);
break; break;
} }
Some(Err(Error::EventMaxLengthError(s))) => { Err(Error::EventMaxLengthError(s)) => {
info!("client {} sent event larger ({} bytes) than max size", cid, s); info!("client {:?} sent event larger ({} bytes) than max size", cid, s);
nostr_stream.send(NoticeRes("event exceeded max size".to_owned())).await.ok(); ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "event exceeded max size"))).await.ok();
}, },
Some(Err(e)) => { Err(Error::ProtoParseError) => {
info!("got non-fatal error from client: {}, error: {:?}", cid, e); info!("client {:?} sent event that could not be parsed", cid);
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "could not parse command"))).await.ok();
},
Err(e) => {
info!("got non-fatal error from client: {:?}, error: {:?}", cid, e);
}, },
} }
}, },
@@ -365,7 +573,7 @@ async fn nostr_server(
stop_tx.send(()).ok(); stop_tx.send(()).ok();
} }
info!( info!(
"stopping connection for client: {} (client sent {} event(s), received {})", "stopping connection for client: {:?} (client sent {} event(s), received {})",
cid, client_published_event_count, client_received_event_count cid, client_published_event_count, client_received_event_count
); );
} }

818
src/nip05.rs Normal file
View File

@@ -0,0 +1,818 @@
//! User verification using NIP-05 names
//!
//! NIP-05 defines a mechanism for authors to associate an internet
//! address with their public key, in metadata events. This module
//! consumes a stream of metadata events, and keeps a database table
//! updated with the current NIP-05 verification status.
use crate::config::SETTINGS;
use crate::db;
use crate::error::{Error, Result};
use crate::event::Event;
use crate::utils::unix_time;
use hyper::body::HttpBody;
use hyper::client::connect::HttpConnector;
use hyper::Client;
use hyper_tls::HttpsConnector;
use log::*;
use rand::Rng;
use rusqlite::params;
use std::time::Duration;
use std::time::Instant;
use std::time::SystemTime;
use tokio::time::Interval;
/// NIP-05 verifier state
pub struct Verifier {
/// Metadata events for us to inspect
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
/// Newly validated events get written and then broadcast on this channel to subscribers
event_tx: tokio::sync::broadcast::Sender<Event>,
/// SQLite read query pool
read_pool: db::SqlitePool,
/// SQLite write query pool
write_pool: db::SqlitePool,
/// HTTP client
client: hyper::Client<HttpsConnector<HttpConnector>, hyper::Body>,
/// After all accounts are updated, wait this long before checking again.
wait_after_finish: Duration,
/// Minimum amount of time between HTTP queries
http_wait_duration: Duration,
/// Interval for updating verification records
reverify_interval: Interval,
}
/// A NIP-05 identifier is a local part and domain.
#[derive(PartialEq, Debug, Clone)]
pub struct Nip05Name {
local: String,
domain: String,
}
impl Nip05Name {
/// Does this name represent the entire domain?
pub fn is_domain_only(&self) -> bool {
self.local == "_"
}
/// Determine the URL to query for verification
fn to_url(&self) -> Option<http::Uri> {
format!(
"https://{}/.well-known/nostr.json?name={}",
self.domain, self.local
)
.parse::<http::Uri>()
.ok()
}
}
// Parsing Nip05Names from strings
impl std::convert::TryFrom<&str> for Nip05Name {
type Error = Error;
fn try_from(inet: &str) -> Result<Self, Self::Error> {
// break full name at the @ boundary.
let components: Vec<&str> = inet.split('@').collect();
if components.len() != 2 {
Err(Error::CustomError("too many/few components".to_owned()))
} else {
// check if local name is valid
let local = components[0];
let domain = components[1];
if local
.chars()
.all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.')
{
if domain
.chars()
.all(|x| x.is_alphanumeric() || x == '-' || x == '.')
{
Ok(Nip05Name {
local: local.to_owned(),
domain: domain.to_owned(),
})
} else {
Err(Error::CustomError(
"invalid character in domain part".to_owned(),
))
}
} else {
Err(Error::CustomError(
"invalid character in local part".to_owned(),
))
}
}
}
}
impl std::fmt::Display for Nip05Name {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}@{}", self.local, self.domain)
}
}
// Current time, with a slight foward jitter in seconds
fn now_jitter(sec: u64) -> u64 {
// random time between now, and 10min in future.
let mut rng = rand::thread_rng();
let jitter_amount = rng.gen_range(0..sec);
let now = unix_time();
now.saturating_add(jitter_amount)
}
/// Check if the specified username and address are present and match in this response body
fn body_contains_user(username: &str, address: &str, bytes: hyper::body::Bytes) -> Result<bool> {
// convert the body into json
let body: serde_json::Value = serde_json::from_slice(&bytes)?;
// ensure we have a names object.
let names_map = body
.as_object()
.and_then(|x| x.get("names"))
.and_then(|x| x.as_object())
.ok_or_else(|| Error::CustomError("not a map".to_owned()))?;
// get the pubkey for the requested user
let check_name = names_map.get(username).and_then(|x| x.as_str());
// ensure the address is a match
Ok(check_name.map(|x| x == address).unwrap_or(false))
}
impl Verifier {
pub fn new(
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
event_tx: tokio::sync::broadcast::Sender<Event>,
) -> Result<Self> {
info!("creating NIP-05 verifier");
// build a database connection for reading and writing.
let write_pool = db::build_pool(
"nip05 writer",
rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE,
1, // min conns
4, // max conns
true, // wait for DB
);
let read_pool = db::build_pool(
"nip05 reader",
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
1, // min conns
8, // max conns
true, // wait for DB
);
// setup hyper client
let https = HttpsConnector::new();
let client = Client::builder().build::<_, hyper::Body>(https);
// After all accounts have been re-verified, don't check again
// for this long.
let wait_after_finish = Duration::from_secs(60 * 10);
// when we have an active queue of accounts to validate, we
// will wait this duration between HTTP requests.
let http_wait_duration = Duration::from_secs(1);
// setup initial interval for re-verification. If we find
// there is no work to be done, it will be reset to a longer
// duration.
let reverify_interval = tokio::time::interval(http_wait_duration);
Ok(Verifier {
metadata_rx,
event_tx,
read_pool,
write_pool,
client,
wait_after_finish,
http_wait_duration,
reverify_interval,
})
}
/// Perform web verification against a NIP-05 name and address.
pub async fn get_web_verification(
&mut self,
nip: &Nip05Name,
pubkey: &str,
) -> UserWebVerificationStatus {
self.get_web_verification_res(nip, pubkey)
.await
.unwrap_or(UserWebVerificationStatus::Unknown)
}
/// Perform web verification against an `Event` (must be metadata).
pub async fn get_web_verification_from_event(
&mut self,
e: &Event,
) -> UserWebVerificationStatus {
let nip_parse = e.get_nip05_addr();
if let Some(nip) = nip_parse {
self.get_web_verification_res(&nip, &e.pubkey)
.await
.unwrap_or(UserWebVerificationStatus::Unknown)
} else {
UserWebVerificationStatus::Unknown
}
}
/// Perform web verification, with a `Result` return.
async fn get_web_verification_res(
&mut self,
nip: &Nip05Name,
pubkey: &str,
) -> Result<UserWebVerificationStatus> {
// determine if this domain should be checked
if !is_domain_allowed(&nip.domain) {
return Ok(UserWebVerificationStatus::DomainNotAllowed);
}
let url = nip
.to_url()
.ok_or_else(|| Error::CustomError("invalid NIP-05 URL".to_owned()))?;
let req = hyper::Request::builder()
.method(hyper::Method::GET)
.uri(url)
.header("Accept", "application/json")
.header(
"User-Agent",
format!(
"nostr-rs-relay/{} NIP-05 Verifier",
crate::info::CARGO_PKG_VERSION.unwrap()
),
)
.body(hyper::Body::empty())
.expect("request builder");
let response_fut = self.client.request(req);
// HTTP request with timeout
match tokio::time::timeout(Duration::from_secs(5), response_fut).await {
Ok(response_res) => {
let response = response_res?;
// limit size of verification document to 1MB.
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
// determine content length from response
let response_content_length = match response.body().size_hint().upper() {
Some(v) => v,
None => MAX_ALLOWED_RESPONSE_SIZE + 1, // reject missing content length
};
// TODO: test how hyper handles the client providing an inaccurate content-length.
if response_content_length <= MAX_ALLOWED_RESPONSE_SIZE {
let (parts, body) = response.into_parts();
// TODO: consider redirects
if parts.status == http::StatusCode::OK {
// parse body, determine if the username / key / address is present
let body_bytes = hyper::body::to_bytes(body).await?;
let body_matches = body_contains_user(&nip.local, pubkey, body_bytes)?;
if body_matches {
return Ok(UserWebVerificationStatus::Verified);
} else {
// successful response, parsed as a nip-05
// document, but this name/pubkey was not
// present.
return Ok(UserWebVerificationStatus::Unverified);
}
}
} else {
info!(
"content length missing or exceeded limits for account: {:?}",
nip.to_string()
);
}
}
Err(_) => {
info!("timeout verifying account {:?}", nip);
return Ok(UserWebVerificationStatus::Unknown);
}
}
Ok(UserWebVerificationStatus::Unknown)
}
/// Perform NIP-05 verifier tasks.
pub async fn run(&mut self) {
// use this to schedule periodic re-validation tasks
// run a loop, restarting on failure
loop {
let res = self.run_internal().await;
if let Err(e) = res {
info!("error in verifier: {:?}", e);
}
}
}
/// Internal select loop for performing verification
async fn run_internal(&mut self) -> Result<()> {
tokio::select! {
m = self.metadata_rx.recv() => {
match m {
Ok(e) => {
if let Some(naddr) = e.get_nip05_addr() {
info!("got metadata event for ({:?},{:?})", naddr.to_string() ,e.get_author_prefix());
// Process a new author, checking if they are verified:
let check_verified = get_latest_user_verification(self.read_pool.get().expect("could not get connection"), &e.pubkey).await;
// ensure the event we got is more recent than the one we have, otherwise we can ignore it.
if let Ok(last_check) = check_verified {
if e.created_at <= last_check.event_created {
// this metadata is from the same author as an existing verification.
// it is older than what we have, so we can ignore it.
debug!("received older metadata event for author {:?}", e.get_author_prefix());
return Ok(());
}
}
// old, or no existing record for this user. In either case, we just create a new one.
let start = Instant::now();
let v = self.get_web_verification_from_event(&e).await;
info!(
"checked name {:?}, result: {:?}, in: {:?}",
naddr.to_string(),
v,
start.elapsed()
);
// sleep to limit how frequently we make HTTP requests for new metadata events. This should limit us to 4 req/sec.
tokio::time::sleep(Duration::from_millis(250)).await;
// if this user was verified, we need to write the
// record, persist the event, and broadcast.
if let UserWebVerificationStatus::Verified = v {
self.create_new_verified_user(&naddr.to_string(), &e).await?;
}
}
},
Err(tokio::sync::broadcast::error::RecvError::Lagged(c)) => {
warn!("incoming metadata events overwhelmed buffer, {} events dropped",c);
}
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
info!("metadata broadcast channel closed");
}
}
},
_ = self.reverify_interval.tick() => {
// check and see if there is an old account that needs
// to be reverified
self.do_reverify().await?;
},
}
Ok(())
}
/// Reverify the oldest user verification record.
async fn do_reverify(&mut self) -> Result<()> {
let reverify_setting;
let max_failures;
{
// this block prevents a read handle to settings being
// captured by the async DB call (guard is not Send)
let settings = SETTINGS.read().unwrap();
reverify_setting = settings.verified_users.verify_update_frequency_duration;
max_failures = settings.verified_users.max_consecutive_failures;
}
// get from settings, but default to 6hrs between re-checking an account
let reverify_dur = reverify_setting.unwrap_or_else(|| Duration::from_secs(60 * 60 * 6));
// find all verification records that have success or failure OLDER than the reverify_dur.
let now = SystemTime::now();
let earliest = now - reverify_dur;
let earliest_epoch = earliest
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
.unwrap_or(0);
let vr = get_oldest_user_verification(self.read_pool.get()?, earliest_epoch).await;
match vr {
Ok(ref v) => {
let new_status = self.get_web_verification(&v.name, &v.address).await;
match new_status {
UserWebVerificationStatus::Verified => {
// freshly verified account, update the
// timestamp.
self.update_verification_record(self.write_pool.get()?, v)
.await?;
}
UserWebVerificationStatus::DomainNotAllowed
| UserWebVerificationStatus::Unknown => {
// server may be offline, or temporarily
// blocked by the config file. Note the
// failure so we can process something
// else.
// have we had enough failures to give up?
if v.failure_count >= max_failures as u64 {
info!(
"giving up on verifying {:?} after {} failures",
v.name, v.failure_count
);
self.delete_verification_record(self.write_pool.get()?, v)
.await?;
} else {
// record normal failure, incrementing failure count
self.fail_verification_record(self.write_pool.get()?, v)
.await?;
}
}
UserWebVerificationStatus::Unverified => {
// domain has removed the verification, drop
// the record on our side.
self.delete_verification_record(self.write_pool.get()?, v)
.await?;
}
}
}
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
// No users need verification. Reset the interval to
// the next verification attempt.
let start = tokio::time::Instant::now() + self.wait_after_finish;
self.reverify_interval = tokio::time::interval_at(start, self.http_wait_duration);
}
Err(ref e) => {
warn!(
"Error when checking for NIP-05 verification records: {:?}",
e
);
}
}
Ok(())
}
/// Reset the verification timestamp on a VerificationRecord
pub async fn update_verification_record(
&mut self,
mut conn: db::PooledConnection,
vr: &VerificationRecord,
) -> Result<()> {
let vr_id = vr.rowid;
let vr_str = vr.to_string();
tokio::task::spawn_blocking(move || {
// add some jitter to the verification to prevent everything from stacking up together.
let verif_time = now_jitter(600);
let tx = conn.transaction()?;
{
// update verification time and reset any failure count
let query =
"UPDATE user_verification SET verified_at=?, failure_count=0 WHERE id=?";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![verif_time, vr_id])?;
}
tx.commit()?;
info!("verification updated for {}", vr_str);
let ok: Result<()> = Ok(());
ok
})
.await?
}
/// Reset the failure timestamp on a VerificationRecord
pub async fn fail_verification_record(
&mut self,
mut conn: db::PooledConnection,
vr: &VerificationRecord,
) -> Result<()> {
let vr_id = vr.rowid;
let vr_str = vr.to_string();
let fail_count = vr.failure_count.saturating_add(1);
tokio::task::spawn_blocking(move || {
// add some jitter to the verification to prevent everything from stacking up together.
let fail_time = now_jitter(600);
let tx = conn.transaction()?;
{
let query = "UPDATE user_verification SET failed_at=?, failure_count=? WHERE id=?";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![fail_time, fail_count, vr_id])?;
}
tx.commit()?;
info!("verification failed for {}", vr_str);
let ok: Result<()> = Ok(());
ok
})
.await?
}
/// Delete a VerificationRecord that is no longer valid
pub async fn delete_verification_record(
&mut self,
mut conn: db::PooledConnection,
vr: &VerificationRecord,
) -> Result<()> {
let vr_id = vr.rowid;
let vr_str = vr.to_string();
tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
{
let query = "DELETE FROM user_verification WHERE id=?;";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![vr_id])?;
}
tx.commit()?;
info!("verification rescinded for {}", vr_str);
let ok: Result<()> = Ok(());
ok
})
.await?
}
/// Persist an event, create a verification record, and broadcast.
// TODO: have more event-writing logic handled in the db module.
// Right now, these events avoid the rate limit. That is
// acceptable since as soon as the user is registered, this path
// is no longer used.
// TODO: refactor these into spawn_blocking
// calls to get them off the async executors.
async fn create_new_verified_user(&mut self, name: &str, event: &Event) -> Result<()> {
let start = Instant::now();
// we should only do this if we are enabled. if we are
// disabled/passive, the event has already been persisted.
let should_write_event;
{
let settings = SETTINGS.read().unwrap();
should_write_event = settings.verified_users.is_enabled()
}
if should_write_event {
match db::write_event(&mut self.write_pool.get()?, event) {
Ok(updated) => {
if updated != 0 {
info!(
"persisted event: {:?} in {:?}",
event.get_event_id_prefix(),
start.elapsed()
);
self.event_tx.send(event.clone()).ok();
}
}
Err(err) => {
warn!("event insert failed: {:?}", err);
if let Error::SqlError(r) = err {
warn!("because: : {:?}", r);
}
}
}
}
// write the verification record
save_verification_record(self.write_pool.get()?, event, name).await?;
Ok(())
}
}
/// Result of checking user's verification status against DNS/HTTP.
#[derive(PartialEq, Debug, Clone)]
pub enum UserWebVerificationStatus {
Verified, // user is verified, as of now.
DomainNotAllowed, // domain blacklist or whitelist denied us from attempting a verification
Unknown, // user's status could not be determined (timeout, server error)
Unverified, // user's status is not verified (successful check, name / addr do not match)
}
/// A NIP-05 verification record.
#[derive(PartialEq, Debug, Clone)]
// Basic information for a verification event. Gives us all we need to assert a NIP-05 address is good.
pub struct VerificationRecord {
pub rowid: u64, // database row for this verification event
pub name: Nip05Name, // address being verified
pub address: String, // pubkey
pub event: String, // event ID hash providing the verification
pub event_created: u64, // when the metadata event was published
pub last_success: Option<u64>, // the most recent time a verification was provided. None if verification under this name has never succeeded.
pub last_failure: Option<u64>, // the most recent time verification was attempted, but could not be completed.
pub failure_count: u64, // how many consecutive failures have been observed.
}
/// Check with settings to determine if a given domain is allowed to
/// publish.
pub fn is_domain_allowed(domain: &str) -> bool {
let settings = SETTINGS.read().unwrap();
// if there is a whitelist, domain must be present in it.
if let Some(wl) = &settings.verified_users.domain_whitelist {
// workaround for Vec contains not accepting &str
return wl.iter().any(|x| x == domain);
}
// otherwise, check that user is not in the blacklist
if let Some(bl) = &settings.verified_users.domain_blacklist {
return !bl.iter().any(|x| x == domain);
}
true
}
impl VerificationRecord {
/// Check if the record is recent enough to be considered valid,
/// and the domain is allowed.
pub fn is_valid(&self) -> bool {
let settings = SETTINGS.read().unwrap();
// how long a verification record is good for
let nip05_expiration = &settings.verified_users.verify_expiration_duration;
if let Some(e) = nip05_expiration {
if !self.is_current(e) {
return false;
}
}
// check domains
is_domain_allowed(&self.name.domain)
}
/// Check if this record has been validated since the given
/// duration.
fn is_current(&self, d: &Duration) -> bool {
match self.last_success {
Some(s) => {
// current time - duration
let now = SystemTime::now();
let cutoff = now - *d;
let cutoff_epoch = cutoff
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
.unwrap_or(0);
s > cutoff_epoch
}
None => false,
}
}
}
impl std::fmt::Display for VerificationRecord {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"({:?},{:?})",
self.name.to_string(),
self.address.chars().take(8).collect::<String>()
)
}
}
/// Create a new verification record based on an event
pub async fn save_verification_record(
mut conn: db::PooledConnection,
event: &Event,
name: &str,
) -> Result<()> {
let e = hex::decode(&event.id).ok();
let n = name.to_owned();
let a_prefix = event.get_author_prefix();
tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
{
// if we create a /new/ one, we should get rid of any old ones. or group the new ones by name and only consider the latest.
let query = "INSERT INTO user_verification (metadata_event, name, verified_at) VALUES ((SELECT id from event WHERE event_hash=?), ?, strftime('%s','now'));";
let mut stmt = tx.prepare(query)?;
stmt.execute(params![e, n])?;
// get the row ID
let v_id = tx.last_insert_rowid();
// delete everything else by this name
let del_query = "DELETE FROM user_verification WHERE name = ? AND id != ?;";
let mut del_stmt = tx.prepare(del_query)?;
let count = del_stmt.execute(params![n,v_id])?;
if count > 0 {
info!("removed {} old verification records for ({:?},{:?})", count, n, a_prefix);
}
}
tx.commit()?;
info!("saved new verification record for ({:?},{:?})", n, a_prefix);
let ok: Result<()> = Ok(());
ok
}).await?
}
/// Retrieve the most recent verification record for a given pubkey (async).
pub async fn get_latest_user_verification(
conn: db::PooledConnection,
pubkey: &str,
) -> Result<VerificationRecord> {
let p = pubkey.to_owned();
tokio::task::spawn_blocking(move || query_latest_user_verification(conn, p)).await?
}
/// Query database for the latest verification record for a given pubkey.
pub fn query_latest_user_verification(
mut conn: db::PooledConnection,
pubkey: String,
) -> Result<VerificationRecord> {
let tx = conn.transaction()?;
let query = "SELECT v.id, v.name, e.event_hash, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE e.author=? ORDER BY e.created_at DESC, v.verified_at DESC, v.failed_at DESC LIMIT 1;";
let mut stmt = tx.prepare_cached(query)?;
let fields = stmt.query_row(params![hex::decode(&pubkey).ok()], |r| {
let rowid: u64 = r.get(0)?;
let rowname: String = r.get(1)?;
let eventid: Vec<u8> = r.get(2)?;
let created_at: u64 = r.get(3)?;
// create a tuple since we can't throw non-rusqlite errors in this closure
Ok((
rowid,
rowname,
eventid,
created_at,
r.get(4).ok(),
r.get(5).ok(),
r.get(6)?,
))
})?;
Ok(VerificationRecord {
rowid: fields.0,
name: Nip05Name::try_from(&fields.1[..])?,
address: pubkey,
event: hex::encode(fields.2),
event_created: fields.3,
last_success: fields.4,
last_failure: fields.5,
failure_count: fields.6,
})
}
/// Retrieve the oldest user verification (async)
pub async fn get_oldest_user_verification(
conn: db::PooledConnection,
earliest: u64,
) -> Result<VerificationRecord> {
let res =
tokio::task::spawn_blocking(move || query_oldest_user_verification(conn, earliest)).await?;
res
}
pub fn query_oldest_user_verification(
mut conn: db::PooledConnection,
earliest: u64,
) -> Result<VerificationRecord> {
let tx = conn.transaction()?;
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
let mut stmt = tx.prepare_cached(query)?;
let fields = stmt.query_row(params![earliest, earliest], |r| {
let rowid: u64 = r.get(0)?;
let rowname: String = r.get(1)?;
let eventid: Vec<u8> = r.get(2)?;
let pubkey: Vec<u8> = r.get(3)?;
let created_at: u64 = r.get(4)?;
// create a tuple since we can't throw non-rusqlite errors in this closure
Ok((
rowid,
rowname,
eventid,
pubkey,
created_at,
r.get(5).ok(),
r.get(6).ok(),
r.get(7)?,
))
})?;
let vr = VerificationRecord {
rowid: fields.0,
name: Nip05Name::try_from(&fields.1[..])?,
address: hex::encode(fields.3),
event: hex::encode(fields.2),
event_created: fields.4,
last_success: fields.5,
last_failure: fields.6,
failure_count: fields.7,
};
Ok(vr)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn local_from_inet() {
let addr = "bob@example.com";
let parsed = Nip05Name::try_from(addr);
assert!(!parsed.is_err());
let v = parsed.unwrap();
assert_eq!(v.local, "bob");
assert_eq!(v.domain, "example.com");
}
#[test]
fn not_enough_sep() {
let addr = "bob_example.com";
let parsed = Nip05Name::try_from(addr);
assert!(parsed.is_err());
}
#[test]
fn too_many_sep() {
let addr = "foo@bob@example.com";
let parsed = Nip05Name::try_from(addr);
assert!(parsed.is_err());
}
#[test]
fn invalid_local_name() {
// non-permitted ascii chars
assert!(Nip05Name::try_from("foo!@example.com").is_err());
assert!(Nip05Name::try_from("foo @example.com").is_err());
assert!(Nip05Name::try_from(" foo@example.com").is_err());
assert!(Nip05Name::try_from("f oo@example.com").is_err());
assert!(Nip05Name::try_from("foo<@example.com").is_err());
// unicode dash
assert!(Nip05Name::try_from("foobar@example.com").is_err());
// emoji
assert!(Nip05Name::try_from("foo😭bar@example.com").is_err());
}
#[test]
fn invalid_domain_name() {
// non-permitted ascii chars
assert!(Nip05Name::try_from("foo@examp!e.com").is_err());
assert!(Nip05Name::try_from("foo@ example.com").is_err());
assert!(Nip05Name::try_from("foo@exa mple.com").is_err());
assert!(Nip05Name::try_from("foo@example .com").is_err());
assert!(Nip05Name::try_from("foo@exa<mple.com").is_err());
// unicode dash
assert!(Nip05Name::try_from("foobar@example.com").is_err());
// emoji
assert!(Nip05Name::try_from("foobar@ex😭ample.com").is_err());
}
#[test]
fn to_url() {
let nip = Nip05Name::try_from("foobar@example.com").unwrap();
assert_eq!(
nip.to_url(),
Some(
"https://example.com/.well-known/nostr.json?name=foobar"
.parse()
.unwrap()
)
);
}
}

View File

@@ -1,132 +0,0 @@
//! Nostr protocol layered over WebSocket
use crate::close::CloseCmd;
use crate::config;
use crate::error::{Error, Result};
use crate::event::EventCmd;
use crate::subscription::Subscription;
use core::pin::Pin;
use futures::sink::Sink;
use futures::stream::Stream;
use futures::task::Context;
use futures::task::Poll;
use hyper::upgrade::Upgraded;
use log::*;
use serde::{Deserialize, Serialize};
use tokio_tungstenite::WebSocketStream;
use tungstenite::error::Error as WsError;
use tungstenite::protocol::Message;
/// Nostr protocol messages from a client
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
#[serde(untagged)]
pub enum NostrMessage {
/// An `EVENT` message
EventMsg(EventCmd),
/// A `REQ` message
SubMsg(Subscription),
/// A `CLOSE` message
CloseMsg(CloseCmd),
}
/// Nostr protocol messages from a relay/server
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
pub enum NostrResponse {
/// A `NOTICE` response
NoticeRes(String),
/// An `EVENT` response, composed of the subscription identifier,
/// and serialized event JSON
EventRes(String, String),
}
/// A Nostr protocol stream is layered on top of a Websocket stream.
pub struct NostrStream {
ws_stream: WebSocketStream<Upgraded>,
}
/// Given a websocket, return a protocol stream wrapper.
pub fn wrap_ws_in_nostr(ws: WebSocketStream<Upgraded>) -> NostrStream {
NostrStream { ws_stream: ws }
}
/// Implement the [`Stream`] interface to produce Nostr messages.
impl Stream for NostrStream {
type Item = Result<NostrMessage>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
// get the configuration
/// Convert Message to NostrMessage
fn convert(msg: String) -> Result<NostrMessage> {
let config = config::SETTINGS.read().unwrap();
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
match parsed_res {
Ok(m) => {
if let NostrMessage::EventMsg(_) = m {
if let Some(max_size) = config.limits.max_event_bytes {
// check length, ensure that some max size is set.
if msg.len() > max_size && max_size > 0 {
return Err(Error::EventMaxLengthError(msg.len()));
}
}
}
Ok(m)
}
Err(e) => {
debug!("proto parse error: {:?}", e);
Err(Error::ProtoParseError)
}
}
}
match Pin::new(&mut self.ws_stream).poll_next(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(None) => Poll::Ready(None),
Poll::Ready(Some(v)) => match v {
Ok(Message::Text(vs)) => Poll::Ready(Some(convert(vs))),
Ok(Message::Binary(_)) => Poll::Ready(Some(Err(Error::ProtoParseError))),
Ok(Message::Pong(_)) | Ok(Message::Ping(_)) => Poll::Pending,
Ok(Message::Close(_)) => Poll::Ready(None),
Err(WsError::AlreadyClosed) | Err(WsError::ConnectionClosed) => Poll::Ready(None),
Err(_) => Poll::Ready(Some(Err(Error::ConnError))),
},
}
}
}
/// Implement the [`Sink`] interface to produce Nostr responses.
impl Sink<NostrResponse> for NostrStream {
type Error = Error;
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// map the error type
match Pin::new(&mut self.ws_stream).poll_ready(cx) {
Poll::Ready(Ok(())) => Poll::Ready(Ok(())),
Poll::Ready(Err(_)) => Poll::Ready(Err(Error::ConnWriteError)),
Poll::Pending => Poll::Pending,
}
}
fn start_send(mut self: Pin<&mut Self>, item: NostrResponse) -> Result<(), Self::Error> {
// TODO: do real escaping for these - at least on NOTICE,
// which surely has some problems if arbitrary text is sent.
let send_str = match item {
NostrResponse::NoticeRes(msg) => {
let s = msg.replace("\"", "");
format!("[\"NOTICE\",\"{}\"]", s)
}
NostrResponse::EventRes(sub, eventstr) => {
let subesc = sub.replace("\"", "");
format!("[\"EVENT\",\"{}\",{}]", subesc, eventstr)
}
};
match Pin::new(&mut self.ws_stream).start_send(Message::Text(send_str)) {
Ok(()) => Ok(()),
Err(_) => Err(Error::ConnWriteError),
}
}
fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
}

237
src/schema.rs Normal file
View File

@@ -0,0 +1,237 @@
//! Database schema and migrations
use crate::db::PooledConnection;
use crate::error::Result;
use crate::utils::is_hex;
use log::*;
use rusqlite::limits::Limit;
use rusqlite::params;
use rusqlite::Connection;
// TODO: drop the pubkey_ref and event_ref tables
/// Startup DB Pragmas
pub const STARTUP_SQL: &str = r##"
PRAGMA main.synchronous=NORMAL;
PRAGMA foreign_keys = ON;
pragma mmap_size = 536870912; -- 512MB of mmap
"##;
/// Schema definition
const INIT_SQL: &str = r##"
-- Database settings
PRAGMA encoding = "UTF-8";
PRAGMA journal_mode=WAL;
PRAGMA main.synchronous=NORMAL;
PRAGMA foreign_keys = ON;
PRAGMA application_id = 1654008667;
PRAGMA user_version = 5;
-- Event Table
CREATE TABLE IF NOT EXISTS event (
id INTEGER PRIMARY KEY,
event_hash BLOB NOT NULL, -- 4-byte hash
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970)
created_at INTEGER NOT NULL, -- when the event was authored
author BLOB NOT NULL, -- author pubkey
kind INTEGER NOT NULL, -- event kind
hidden INTEGER, -- relevant for queries
content TEXT NOT NULL -- serialized json of event object
);
-- Event Indexes
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS author_index ON event(author);
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
-- Tag Table
-- Tag values are stored as either a BLOB (if they come in as a
-- hex-string), or TEXT otherwise.
-- This means that searches need to select the appropriate column.
CREATE TABLE IF NOT EXISTS tag (
id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
name TEXT, -- the tag name ("p", "e", whatever)
value TEXT, -- the tag value, if not hex.
value_hex BLOB, -- the tag value, if it can be interpreted as a hex string.
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
CREATE INDEX IF NOT EXISTS tag_val_hex_index ON tag(value_hex);
-- NIP-05 User Validation
CREATE TABLE IF NOT EXISTS user_verification (
id INTEGER PRIMARY KEY,
metadata_event INTEGER NOT NULL, -- the metadata event used for this validation.
name TEXT NOT NULL, -- the nip05 field value (user@domain).
verified_at INTEGER, -- timestamp this author/nip05 was most recently verified.
failed_at INTEGER, -- timestamp a verification attempt failed (host down).
failure_count INTEGER DEFAULT 0, -- number of consecutive failures.
FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
"##;
/// Determine the current application database schema version.
pub fn db_version(conn: &mut Connection) -> Result<usize> {
let query = "PRAGMA user_version;";
let curr_version = conn.query_row(query, [], |row| row.get(0))?;
Ok(curr_version)
}
/// Upgrade DB to latest version, and execute pragma settings
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
// check the version.
let mut curr_version = db_version(conn)?;
info!("DB version = {:?}", curr_version);
debug!(
"SQLite max query parameters: {}",
conn.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER)
);
debug!(
"SQLite max table/blob/text length: {} MB",
(conn.limit(Limit::SQLITE_LIMIT_LENGTH) as f64 / (1024 * 1024) as f64).floor()
);
debug!(
"SQLite max SQL length: {} MB",
(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH) as f64 / (1024 * 1024) as f64).floor()
);
// initialize from scratch
if curr_version == 0 {
match conn.execute_batch(INIT_SQL) {
Ok(()) => {
info!("database pragma/schema initialized to v4, and ready");
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be initialized");
}
}
}
if curr_version == 1 {
// only change is adding a hidden column to events.
let upgrade_sql = r##"
ALTER TABLE event ADD hidden INTEGER;
UPDATE event SET hidden=FALSE;
PRAGMA user_version = 2;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v1 -> v2");
curr_version = 2;
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
}
if curr_version == 2 {
// this version lacks the tag column
info!("database schema needs update from 2->3");
let upgrade_sql = r##"
CREATE TABLE IF NOT EXISTS tag (
id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
name TEXT, -- the tag name ("p", "e", whatever)
value TEXT, -- the tag value, if not hex.
value_hex BLOB, -- the tag value, if it can be interpreted as a hex string.
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
PRAGMA user_version = 3;
"##;
// TODO: load existing refs into tag table
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v2 -> v3");
curr_version = 3;
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
info!("Starting transaction");
// iterate over every event/pubkey tag
let tx = conn.transaction()?;
{
let mut stmt = tx.prepare("select event_id, \"e\", lower(hex(referenced_event)) from event_ref union select event_id, \"p\", lower(hex(referenced_pubkey)) from pubkey_ref;")?;
let mut tag_rows = stmt.query([])?;
while let Some(row) = tag_rows.next()? {
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
let event_id: u64 = row.get(0)?;
let tag_name: String = row.get(1)?;
let tag_value: String = row.get(2)?;
// this will leave behind p/e tags that were non-hex, but they are invalid anyways.
if is_hex(&tag_value) {
tx.execute(
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
params![event_id, tag_name, hex::decode(&tag_value).ok()],
)?;
}
}
}
tx.commit()?;
info!("Upgrade complete");
}
if curr_version == 3 {
info!("database schema needs update from 3->4");
let upgrade_sql = r##"
-- incoming metadata events with nip05
CREATE TABLE IF NOT EXISTS user_verification (
id INTEGER PRIMARY KEY,
metadata_event INTEGER NOT NULL, -- the metadata event used for this validation.
name TEXT NOT NULL, -- the nip05 field value (user@domain).
verified_at INTEGER, -- timestamp this author/nip05 was most recently verified.
failed_at INTEGER, -- timestamp a verification attempt failed (host down).
failure_count INTEGER DEFAULT 0, -- number of consecutive failures.
FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
PRAGMA user_version = 4;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v3 -> v4");
curr_version = 4;
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
}
if curr_version == 4 {
info!("database schema needs update from 4->5");
let upgrade_sql = r##"
DROP TABLE IF EXISTS event_ref;
DROP TABLE IF EXISTS pubkey_ref;
PRAGMA user_version=5;
"##;
match conn.execute_batch(upgrade_sql) {
Ok(()) => {
info!("database schema upgraded v4 -> v5");
// uncomment if we have a newer version
//curr_version = 5;
}
Err(err) => {
error!("update failed: {}", err);
panic!("database could not be upgraded");
}
}
} else if curr_version == 5 {
debug!("Database version was already current");
} else if curr_version > 5 {
panic!("Database version is newer than supported by this executable");
}
// Setup PRAGMA
conn.execute_batch(STARTUP_SQL)?;
debug!("SQLite PRAGMA startup completed");
Ok(())
}

View File

@@ -1,7 +1,10 @@
//! Subscription and filter parsing //! Subscription and filter parsing
use crate::error::Result; use crate::error::Result;
use crate::event::Event; use crate::event::Event;
use serde::de::Unexpected;
use serde::{Deserialize, Deserializer, Serialize}; use serde::{Deserialize, Deserializer, Serialize};
use serde_json::Value;
use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
/// Subscription identifier and set of request filters /// Subscription identifier and set of request filters
@@ -16,24 +19,76 @@ pub struct Subscription {
/// Corresponds to client-provided subscription request elements. Any /// Corresponds to client-provided subscription request elements. Any
/// element can be present if it should be used in filtering, or /// element can be present if it should be used in filtering, or
/// absent ([`None`]) if it should be ignored. /// absent ([`None`]) if it should be ignored.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] #[derive(Serialize, PartialEq, Debug, Clone)]
pub struct ReqFilter { pub struct ReqFilter {
/// Event hashes /// Event hashes
pub ids: Option<Vec<String>>, pub ids: Option<Vec<String>>,
/// Event kinds /// Event kinds
pub kinds: Option<Vec<u64>>, pub kinds: Option<Vec<u64>>,
/// Referenced event hash
#[serde(rename = "#e")]
pub events: Option<Vec<String>>,
/// Referenced public key for a petname
#[serde(rename = "#p")]
pub pubkeys: Option<Vec<String>>,
/// Events published after this time /// Events published after this time
pub since: Option<u64>, pub since: Option<u64>,
/// Events published before this time /// Events published before this time
pub until: Option<u64>, pub until: Option<u64>,
/// List of author public keys /// List of author public keys
pub authors: Option<Vec<String>>, pub authors: Option<Vec<String>>,
/// Set of tags
#[serde(skip)]
pub tags: Option<HashMap<String, HashSet<String>>>,
}
impl<'de> Deserialize<'de> for ReqFilter {
fn deserialize<D>(deserializer: D) -> Result<ReqFilter, D::Error>
where
D: Deserializer<'de>,
{
let received: Value = Deserialize::deserialize(deserializer)?;
let filter = received.as_object().ok_or_else(|| {
serde::de::Error::invalid_type(
Unexpected::Other("reqfilter is not an object"),
&"a json object",
)
})?;
let mut rf = ReqFilter {
ids: None,
kinds: None,
since: None,
until: None,
authors: None,
tags: None,
};
let mut ts = None;
// iterate through each key, and assign values that exist
for (key, val) in filter.into_iter() {
// ids
if key == "ids" {
rf.ids = Deserialize::deserialize(val).ok();
} else if key == "kinds" {
rf.kinds = Deserialize::deserialize(val).ok();
} else if key == "since" {
rf.since = Deserialize::deserialize(val).ok();
} else if key == "until" {
rf.until = Deserialize::deserialize(val).ok();
} else if key == "authors" {
rf.authors = Deserialize::deserialize(val).ok();
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
// remove the prefix
let tagname = &key[1..];
if ts.is_none() {
// Initialize the tag if necessary
ts = Some(HashMap::new());
}
if let Some(m) = ts.as_mut() {
let tag_vals: Option<Vec<String>> = Deserialize::deserialize(val).ok();
if let Some(v) = tag_vals {
let hs = HashSet::from_iter(v.into_iter());
m.insert(tagname.to_owned(), hs);
}
};
}
}
rf.tags = ts;
Ok(rf)
}
} }
impl<'de> Deserialize<'de> for Subscription { impl<'de> Deserialize<'de> for Subscription {
@@ -43,7 +98,7 @@ impl<'de> Deserialize<'de> for Subscription {
where where
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
let mut v: serde_json::Value = Deserialize::deserialize(deserializer)?; let mut v: Value = Deserialize::deserialize(deserializer)?;
// this shoud be a 3-or-more element array. // this shoud be a 3-or-more element array.
// verify the first element is a String, REQ // verify the first element is a String, REQ
// get the subscription from the second element. // get the subscription from the second element.
@@ -78,6 +133,7 @@ impl<'de> Deserialize<'de> for Subscription {
for fv in i { for fv in i {
let f: ReqFilter = serde_json::from_value(fv.take()) let f: ReqFilter = serde_json::from_value(fv.take())
.map_err(|_| serde::de::Error::custom("could not parse filter"))?; .map_err(|_| serde::de::Error::custom("could not parse filter"))?;
// create indexes
filters.push(f); filters.push(f);
} }
Ok(Subscription { Ok(Subscription {
@@ -104,50 +160,45 @@ impl Subscription {
} }
} }
fn prefix_match(prefixes: &[String], target: &str) -> bool {
for prefix in prefixes {
if target.starts_with(prefix) {
return true;
}
}
// none matched
false
}
impl ReqFilter { impl ReqFilter {
/// Check for a match within the authors list.
fn ids_match(&self, event: &Event) -> bool { fn ids_match(&self, event: &Event) -> bool {
self.ids self.ids
.as_ref() .as_ref()
.map(|vs| vs.contains(&event.id.to_owned())) .map(|vs| prefix_match(vs, &event.id))
.unwrap_or(true) .unwrap_or(true)
} }
fn authors_match(&self, event: &Event) -> bool { fn authors_match(&self, event: &Event) -> bool {
self.authors self.authors
.as_ref() .as_ref()
.map(|vs| vs.contains(&event.pubkey.to_owned())) .map(|vs| prefix_match(vs, &event.pubkey))
.unwrap_or(true) .unwrap_or(true)
} }
/// Check if this filter either matches, or does not care about the event tags.
fn event_match(&self, event: &Event) -> bool {
// This needs to be analyzed for performance; building these
// hash sets for each active subscription isn't great.
if let Some(es) = &self.events {
let event_refs =
HashSet::<_>::from_iter(event.get_event_tags().iter().map(|x| x.to_owned()));
let filter_refs = HashSet::<_>::from_iter(es.iter().map(|x| &x[..]));
let cardinality = event_refs.intersection(&filter_refs).count();
cardinality > 0
} else {
true
}
}
/// Check if this filter either matches, or does not care about fn tag_match(&self, event: &Event) -> bool {
/// the pubkey/petname tags. // get the hashset from the filter.
fn pubkey_match(&self, event: &Event) -> bool { if let Some(map) = &self.tags {
// This needs to be analyzed for performance; building these for (key, val) in map.iter() {
// hash sets for each active subscription isn't great. let tag_match = event.generic_tag_val_intersect(key, val);
if let Some(ps) = &self.pubkeys { // if there is no match for this tag, the match fails.
let pubkey_refs = if !tag_match {
HashSet::<_>::from_iter(event.get_pubkey_tags().iter().map(|x| x.to_owned())); return false;
let filter_refs = HashSet::<_>::from_iter(ps.iter().map(|x| &x[..]));
let cardinality = pubkey_refs.intersection(&filter_refs).count();
cardinality > 0
} else {
true
} }
// if there was a match, we move on to the next one.
}
}
// if the tag map is empty, the match succeeds (there was no filter)
true
} }
/// Check if this filter either matches, or does not care about the kind. /// Check if this filter either matches, or does not care about the kind.
@@ -163,10 +214,10 @@ impl ReqFilter {
// self.id.as_ref().map(|v| v == &event.id).unwrap_or(true) // self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
self.ids_match(event) self.ids_match(event)
&& self.since.map(|t| event.created_at > t).unwrap_or(true) && self.since.map(|t| event.created_at > t).unwrap_or(true)
&& self.until.map(|t| event.created_at < t).unwrap_or(true)
&& self.kind_match(event.kind) && self.kind_match(event.kind)
&& self.authors_match(event) && self.authors_match(event)
&& self.pubkey_match(event) && self.tag_match(event)
&& self.event_match(event)
} }
} }
@@ -197,27 +248,66 @@ mod tests {
} }
#[test] #[test]
fn invalid_filter() { fn legacy_filter() {
// unrecognized field in filter // legacy field in filter
let raw_json = "[\"REQ\",\"some-id\",{\"foo\": 3}]"; let raw_json = "[\"REQ\",\"some-id\",{\"kind\": 3}]";
assert!(serde_json::from_str::<Subscription>(raw_json).is_err()); assert!(serde_json::from_str::<Subscription>(raw_json).is_ok());
} }
#[test] #[test]
fn author_filter() -> Result<()> { fn author_filter() -> Result<()> {
let raw_json = "[\"REQ\",\"some-id\",{\"author\": \"test-author-id\"}]"; let raw_json = r#"["REQ","some-id",{"authors": ["test-author-id"]}]"#;
let s: Subscription = serde_json::from_str(raw_json)?; let s: Subscription = serde_json::from_str(raw_json)?;
assert_eq!(s.id, "some-id"); assert_eq!(s.id, "some-id");
assert_eq!(s.filters.len(), 1); assert_eq!(s.filters.len(), 1);
let first_filter = s.filters.get(0).unwrap(); let first_filter = s.filters.get(0).unwrap();
assert_eq!(first_filter.author, Some("test-author-id".to_owned())); assert_eq!(
first_filter.authors,
Some(vec!("test-author-id".to_owned()))
);
Ok(())
}
#[test]
fn interest_author_prefix_match() -> Result<()> {
// subscription with a filter for ID
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors": ["abc"]}]"#)?;
let e = Event {
id: "foo".to_owned(),
pubkey: "abcd".to_owned(),
created_at: 0,
kind: 0,
tags: Vec::new(),
content: "".to_owned(),
sig: "".to_owned(),
tagidx: None,
};
assert!(s.interested_in_event(&e));
Ok(())
}
#[test]
fn interest_id_prefix_match() -> Result<()> {
// subscription with a filter for ID
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"]}]"#)?;
let e = Event {
id: "abcd".to_owned(),
pubkey: "".to_owned(),
created_at: 0,
kind: 0,
tags: Vec::new(),
content: "".to_owned(),
sig: "".to_owned(),
tagidx: None,
};
assert!(s.interested_in_event(&e));
Ok(()) Ok(())
} }
#[test] #[test]
fn interest_id_nomatch() -> Result<()> { fn interest_id_nomatch() -> Result<()> {
// subscription with a filter for ID // subscription with a filter for ID
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"id":"abc"}]"#)?; let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"ids": ["xyz"]}]"#)?;
let e = Event { let e = Event {
id: "abcde".to_owned(), id: "abcde".to_owned(),
pubkey: "".to_owned(), pubkey: "".to_owned(),
@@ -226,15 +316,17 @@ mod tests {
tags: Vec::new(), tags: Vec::new(),
content: "".to_owned(), content: "".to_owned(),
sig: "".to_owned(), sig: "".to_owned(),
tagidx: None,
}; };
assert_eq!(s.interested_in_event(&e), false); assert!(!s.interested_in_event(&e));
Ok(()) Ok(())
} }
#[test] #[test]
fn interest_time_and_id() -> Result<()> { fn interest_until() -> Result<()> {
// subscription with a filter for ID and time // subscription with a filter for ID and time
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"id":"abc", "since": 1000}]"#)?; let s: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "until": 1000}]"#)?;
let e = Event { let e = Event {
id: "abc".to_owned(), id: "abc".to_owned(),
pubkey: "".to_owned(), pubkey: "".to_owned(),
@@ -243,8 +335,53 @@ mod tests {
tags: Vec::new(), tags: Vec::new(),
content: "".to_owned(), content: "".to_owned(),
sig: "".to_owned(), sig: "".to_owned(),
tagidx: None,
}; };
assert_eq!(s.interested_in_event(&e), false); assert!(s.interested_in_event(&e));
Ok(())
}
#[test]
fn interest_range() -> Result<()> {
// subscription with a filter for ID and time
let s_in: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 200}]"#)?;
let s_before: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 140}]"#)?;
let s_after: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 160, "until": 200}]"#)?;
let e = Event {
id: "abc".to_owned(),
pubkey: "".to_owned(),
created_at: 150,
kind: 0,
tags: Vec::new(),
content: "".to_owned(),
sig: "".to_owned(),
tagidx: None,
};
assert!(s_in.interested_in_event(&e));
assert!(!s_before.interested_in_event(&e));
assert!(!s_after.interested_in_event(&e));
Ok(())
}
#[test]
fn interest_time_and_id() -> Result<()> {
// subscription with a filter for ID and time
let s: Subscription =
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 1000}]"#)?;
let e = Event {
id: "abc".to_owned(),
pubkey: "".to_owned(),
created_at: 50,
kind: 0,
tags: Vec::new(),
content: "".to_owned(),
sig: "".to_owned(),
tagidx: None,
};
assert!(!s.interested_in_event(&e));
Ok(()) Ok(())
} }
@@ -260,8 +397,9 @@ mod tests {
tags: Vec::new(), tags: Vec::new(),
content: "".to_owned(), content: "".to_owned(),
sig: "".to_owned(), sig: "".to_owned(),
tagidx: None,
}; };
assert_eq!(s.interested_in_event(&e), true); assert!(s.interested_in_event(&e));
Ok(()) Ok(())
} }
@@ -277,8 +415,9 @@ mod tests {
tags: Vec::new(), tags: Vec::new(),
content: "".to_owned(), content: "".to_owned(),
sig: "".to_owned(), sig: "".to_owned(),
tagidx: None,
}; };
assert_eq!(s.interested_in_event(&e), true); assert!(s.interested_in_event(&e));
Ok(()) Ok(())
} }
@@ -294,8 +433,9 @@ mod tests {
tags: Vec::new(), tags: Vec::new(),
content: "".to_owned(), content: "".to_owned(),
sig: "".to_owned(), sig: "".to_owned(),
tagidx: None,
}; };
assert_eq!(s.interested_in_event(&e), true); assert!(s.interested_in_event(&e));
Ok(()) Ok(())
} }
#[test] #[test]
@@ -311,8 +451,9 @@ mod tests {
tags: Vec::new(), tags: Vec::new(),
content: "".to_owned(), content: "".to_owned(),
sig: "".to_owned(), sig: "".to_owned(),
tagidx: None,
}; };
assert_eq!(s.interested_in_event(&e), true); assert!(s.interested_in_event(&e));
Ok(()) Ok(())
} }
@@ -328,8 +469,9 @@ mod tests {
tags: Vec::new(), tags: Vec::new(),
content: "".to_owned(), content: "".to_owned(),
sig: "".to_owned(), sig: "".to_owned(),
tagidx: None,
}; };
assert_eq!(s.interested_in_event(&e), false); assert!(!s.interested_in_event(&e));
Ok(()) Ok(())
} }
} }

15
src/utils.rs Normal file
View File

@@ -0,0 +1,15 @@
//! Common utility functions
use std::time::SystemTime;
/// Seconds since 1970.
pub fn unix_time() -> u64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.map(|x| x.as_secs())
.unwrap_or(0)
}
/// Check if a string contains only hex characters.
pub fn is_hex(s: &str) -> bool {
s.chars().all(|x| char::is_ascii_hexdigit(&x))
}