mirror of
https://github.com/scsibug/nostr-rs-relay.git
synced 2025-09-01 03:40:46 -04:00
Compare commits
59 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
f2001dc34a | ||
|
b593001229 | ||
|
5913b9f87a | ||
|
77f35f9f43 | ||
|
9e06cc9482 | ||
|
e66fa4ac42 | ||
|
99e117f620 | ||
|
8250e00f05 | ||
|
c9f87ec563 | ||
|
ceaa01e8b4 | ||
|
bc68cd0c74 | ||
|
97589006fa | ||
|
e31d0729f2 | ||
|
89d96e7ccd | ||
|
7056aae227 | ||
|
753df47443 | ||
|
26a0ce2b32 | ||
|
fa66a0265e | ||
|
234a8ba0ac | ||
|
f679fa0893 | ||
|
4cc313fa2d | ||
|
6502f7dcd7 | ||
|
6ca3e3ffea | ||
|
49c668a07c | ||
|
98c6fa6f39 | ||
|
452bbbb0e5 | ||
|
ee0de6f875 | ||
|
699489ebaf | ||
|
af9da65f71 | ||
|
a72eaec3b8 | ||
|
f1206e76f2 | ||
|
af453548ee | ||
|
df251c821c | ||
|
2d28a95ff7 | ||
|
8c93ef5bc2 | ||
|
1c0fc1326d | ||
|
179928378e | ||
|
c605d75bb4 | ||
|
81e4e2b892 | ||
|
6f166433b5 | ||
|
030b64de62 | ||
|
c7eadb1154 | ||
|
62dc77369d | ||
|
24587435ca | ||
|
a3124ccea4 | ||
|
4e51e61d16 | ||
|
5c8390bbe0 | ||
|
da7968efef | ||
|
7037555516 | ||
|
19ed990c57 | ||
|
d78bbfc290 | ||
|
2924da88bc | ||
|
3024e9fba4 | ||
|
d3da4eb009 | ||
|
19637d612e | ||
|
afc9a0096a | ||
|
3d56262386 | ||
|
6673fcfd11 | ||
|
b5da3fa2b0 |
606
Cargo.lock
generated
606
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
18
Cargo.toml
18
Cargo.toml
@@ -1,12 +1,12 @@
|
||||
[package]
|
||||
name = "nostr-rs-relay"
|
||||
version = "0.2.3"
|
||||
version = "0.5.1"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
log = "^0.4"
|
||||
env_logger = "^0.9"
|
||||
tokio = { version = "^1.14", features = ["full"] }
|
||||
tokio = { version = "^1.16", features = ["full"] }
|
||||
futures = "^0.3"
|
||||
futures-util = "^0.3"
|
||||
tokio-tungstenite = "^0.16"
|
||||
@@ -15,12 +15,18 @@ thiserror = "^1"
|
||||
uuid = { version = "^0.8", features = ["v4"] }
|
||||
config = { version = "0.11", features = ["toml"] }
|
||||
bitcoin_hashes = { version = "^0.9", features = ["serde"] }
|
||||
secp256k1 = {git = "https://github.com/rust-bitcoin/rust-secp256k1.git", rev = "50034ccb18fdd84904ab3aa6c84a12fcced33209", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
|
||||
secp256k1 = {version = "^0.21", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
|
||||
serde = { version = "^1.0", features = ["derive"] }
|
||||
serde_json = "^1.0"
|
||||
serde_json = {version = "^1.0", features = ["preserve_order"]}
|
||||
hex = "^0.4"
|
||||
rusqlite = "^0.26"
|
||||
rusqlite = { version = "^0.26", features = ["limits"]}
|
||||
r2d2 = "^0.8"
|
||||
r2d2_sqlite = "^0.19"
|
||||
lazy_static = "^1.4"
|
||||
governor = "^0.4"
|
||||
nonzero_ext = "^0.3"
|
||||
hyper={ version="0.14", features=["server","http1","http2","tcp"] }
|
||||
hyper = { version="0.14", features=["client", "server","http1","http2","tcp"] }
|
||||
hyper-tls = "^0.5"
|
||||
http = { version = "^0.2" }
|
||||
parse_duration = "^2"
|
||||
rand = "^0.8"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
FROM rust:1.57 as builder
|
||||
FROM rust:1.58.1 as builder
|
||||
|
||||
RUN USER=root cargo new --bin nostr-rs-relay
|
||||
WORKDIR ./nostr-rs-relay
|
||||
@@ -12,11 +12,11 @@ COPY ./src ./src
|
||||
RUN rm ./target/release/deps/nostr*relay*
|
||||
RUN cargo build --release
|
||||
|
||||
FROM debian:buster-slim
|
||||
FROM debian:bullseye-20220125-slim
|
||||
ARG APP=/usr/src/app
|
||||
ARG APP_DATA=/usr/src/app/db
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y ca-certificates tzdata sqlite3 \
|
||||
&& apt-get install -y ca-certificates tzdata sqlite3 libc6 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
EXPOSE 8080
|
||||
|
29
README.md
29
README.md
@@ -8,6 +8,20 @@ The project master repository is available on
|
||||
[sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is
|
||||
mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
|
||||
|
||||
## Features
|
||||
|
||||
NIPs with a relay-specific implementation are listed here.
|
||||
|
||||
- [x] NIP-01: Core event model
|
||||
- [x] NIP-01: Hide old metadata events
|
||||
- [x] NIP-01: Id/Author prefix search (_experimental_)
|
||||
- [x] NIP-02: Hide old contact list events
|
||||
- [ ] NIP-03: OpenTimestamps
|
||||
- [x] NIP-05: Mapping Nostr keys to DNS identifiers
|
||||
- [ ] NIP-09: Event deletion
|
||||
- [x] NIP-11: Relay information document
|
||||
- [x] NIP-12: Generic tag search (_experimental_)
|
||||
|
||||
## Quick Start
|
||||
|
||||
The provided `Dockerfile` will compile and build the server
|
||||
@@ -39,9 +53,12 @@ Text Note [81cf...2652] from 296a...9b92 5 seconds ago
|
||||
hello world
|
||||
```
|
||||
|
||||
A pre-built container is also available on DockerHub:
|
||||
https://hub.docker.com/r/scsibug/nostr-rs-relay
|
||||
|
||||
## Configuration
|
||||
|
||||
The sample `[config.toml](config.toml)` file demonstrates the
|
||||
The sample [`config.toml`](config.toml) file demonstrates the
|
||||
configuration available to the relay. This file is optional, but may
|
||||
be mounted into a docker container like so:
|
||||
|
||||
@@ -55,6 +72,16 @@ $ docker run -it -p 7000:8080 \
|
||||
Options include rate-limiting, event size limits, and network address
|
||||
settings.
|
||||
|
||||
## Reverse Proxy Configuration
|
||||
|
||||
For examples of putting the relay behind a reverse proxy (for TLS
|
||||
termination, load balancing, and other features), see [Reverse
|
||||
Proxy](reverse-proxy.md).
|
||||
|
||||
## Dev Channel
|
||||
The current dev discussions for this project is happening at https://discord.gg/ufG6fH52Vk.
|
||||
Drop in to query any development related questions.
|
||||
|
||||
License
|
||||
---
|
||||
This project is MIT licensed.
|
||||
|
75
config.toml
75
config.toml
@@ -1,13 +1,39 @@
|
||||
# Nostr-rs-relay configuration
|
||||
|
||||
[info]
|
||||
# The advertised URL for the Nostr websocket.
|
||||
relay_url = "wss://nostr.example.com/"
|
||||
|
||||
# Relay information for clients. Put your unique server name here.
|
||||
name = "nostr-rs-relay"
|
||||
|
||||
# Description
|
||||
description = "A newly created nostr-rs-relay.\n\nCustomize this with your own info."
|
||||
|
||||
# Administrative contact pubkey
|
||||
#pubkey = "0c2d168a4ae8ca58c9f1ab237b5df682599c6c7ab74307ea8b05684b60405d41"
|
||||
|
||||
# Administrative contact URI
|
||||
#contact = "mailto:contact@example.com"
|
||||
|
||||
[database]
|
||||
# Directory for SQLite files. Defaults to the current directory. Can
|
||||
# also be specified (and overriden) with the "--db dirname" command
|
||||
# line option.
|
||||
data_directory = "."
|
||||
|
||||
# Database connection pool settings for subscribers:
|
||||
|
||||
# Minimum number of SQLite reader connections
|
||||
#min_conn = 4
|
||||
|
||||
# Maximum number of SQLite reader connections
|
||||
#max_conn = 128
|
||||
|
||||
[network]
|
||||
# Bind to this network address
|
||||
address = "0.0.0.0"
|
||||
|
||||
# Listen on this port
|
||||
port = 8080
|
||||
|
||||
@@ -20,22 +46,57 @@ reject_future_seconds = 1800
|
||||
[limits]
|
||||
# Limit events created per second, averaged over one minute. Must be
|
||||
# an integer. If not set (or set to 0), defaults to unlimited.
|
||||
messages_per_sec = 0
|
||||
#messages_per_sec = 0
|
||||
|
||||
# Limit the maximum size of an EVENT message. Defaults to 128 KB.
|
||||
# Set to 0 for unlimited.
|
||||
max_event_bytes = 131072
|
||||
#max_event_bytes = 131072
|
||||
|
||||
# Maximum WebSocket message in bytes. Defaults to 128 KB.
|
||||
max_ws_message_bytes = 131072
|
||||
#max_ws_message_bytes = 131072
|
||||
|
||||
# Maximum WebSocket frame size in bytes. Defaults to 128 KB.
|
||||
max_ws_frame_bytes = 131072
|
||||
#max_ws_frame_bytes = 131072
|
||||
|
||||
# Broadcast buffer size, in number of events. This prevents slow
|
||||
# readers from consuming memory. Defaults to 4096.
|
||||
broadcast_buffer = 4096
|
||||
# readers from consuming memory.
|
||||
#broadcast_buffer = 16384
|
||||
|
||||
# Event persistence buffer size, in number of events. This provides
|
||||
# backpressure to senders if writes are slow. Defaults to 16.
|
||||
event_persist_buffer = 16
|
||||
#event_persist_buffer = 16
|
||||
|
||||
[authorization]
|
||||
# Pubkey addresses in this array are whitelisted for event publishing.
|
||||
# Only valid events by these authors will be accepted, if the variable
|
||||
# is set.
|
||||
#pubkey_whitelist = [
|
||||
# "35d26e4690cbe1a898af61cc3515661eb5fa763b57bd0b42e45099c8b32fd50f",
|
||||
# "887645fef0ce0c3c1218d2f5d8e6132a19304cdc57cd20281d082f38cfea0072",
|
||||
#]
|
||||
|
||||
[verified_users]
|
||||
# NIP-05 verification of users. Can be "enabled" to require NIP-05
|
||||
# metadata for event authors, "passive" to perform validation but
|
||||
# never block publishing, or "disabled" to do nothing.
|
||||
#mode = "disabled"
|
||||
|
||||
# Domain names that will be prevented from publishing events.
|
||||
#domain_blacklist = ["wellorder.net"]
|
||||
|
||||
# Domain names that are allowed to publish events. If defined, only
|
||||
# events NIP-05 verified authors at these domains are persisted.
|
||||
#domain_whitelist = ["example.com"]
|
||||
|
||||
# Consider an pubkey "verified" if we have a successful validation
|
||||
# from the NIP-05 domain within this amount of time. Note, if the
|
||||
# domain provides a successful response that omits the account,
|
||||
# verification is immediately revoked.
|
||||
#verify_expiration = "1 week"
|
||||
|
||||
# How long to wait between verification attempts for a specific author.
|
||||
#verify_update_frequency = "24 hours"
|
||||
|
||||
# How many consecutive failed checks before we give up on verifying
|
||||
# this author.
|
||||
#max_consecutive_failures = 20
|
||||
|
248
docs/user-verification-nip05.md
Normal file
248
docs/user-verification-nip05.md
Normal file
@@ -0,0 +1,248 @@
|
||||
# Author Verification Design Document
|
||||
|
||||
The relay will use NIP-05 DNS-based author verification to limit which
|
||||
authors can publish events to a relay. This document describes how
|
||||
this feature will operate.
|
||||
|
||||
## Considerations
|
||||
|
||||
DNS-based author verification is designed to be deployed in relays that
|
||||
want to prevent spam, so there should be strong protections to prevent
|
||||
unauthorized authors from persisting data. This includes data needed to
|
||||
verify new authors.
|
||||
|
||||
There should be protections in place to ensure the relay cannot be
|
||||
used to spam or flood other webservers. Additionally, there should be
|
||||
protections against server-side request forgery (SSRF).
|
||||
|
||||
## Design Overview
|
||||
|
||||
### Concepts
|
||||
|
||||
All authors are initially "unverified". Unverified authors that submit
|
||||
appropriate `NIP-05` metadata events become "candidates" for
|
||||
verification. A candidate author becomes verified when the relay
|
||||
inspects a kind `0` metadata event for the author with a `nip05` field,
|
||||
and follows the procedure in `NIP-05` to successfully associate the
|
||||
author with an internet identifier.
|
||||
|
||||
The `NIP-05` procedure verifies an author for a fixed period of time,
|
||||
configurable by the relay operator. If this "verification expiration
|
||||
time" (`verify_expiration`) is exceeded without being refreshed, they
|
||||
are once again unverified.
|
||||
|
||||
Verified authors have their status regularly and automatically updated
|
||||
through scheduled polling to their verified domain, this process is
|
||||
"re-verification". It is performed based on the configuration setting
|
||||
`verify_update_frequency`, which defines how long the relay waits
|
||||
between verification attempts (whether the result was success or
|
||||
failure).
|
||||
|
||||
Authors may change their verification data (the internet identifier from
|
||||
`NIP-05`) with a new metadata event, which then requires
|
||||
re-verification. Their old verification remains valid until
|
||||
expiration.
|
||||
|
||||
Performing candidate author verification is a best-effort activity and
|
||||
may be significantly rate-limited to prevent relays being used to
|
||||
attack other hosts. Candidate verification (untrusted authors) should
|
||||
never impact re-verification (trusted authors).
|
||||
|
||||
## Operating Modes
|
||||
|
||||
The relay may operate in one of three modes. "Disabled" performs no
|
||||
validation activities, and will never permit or deny events based on
|
||||
an author's NIP-05 metadata. "Passive" performs NIP-05 validation,
|
||||
but does not permit or deny events based on the validity or presence
|
||||
of NIP-05 metadata. "Enabled" will require current and valid NIP-05
|
||||
metadata for any events to be persisted. "Enabled" mode will
|
||||
additionally consider domain whitelist/blacklist configuration data to
|
||||
restrict which author's events are persisted.
|
||||
|
||||
## Design Details
|
||||
|
||||
### Data Storage
|
||||
|
||||
Verification is stored in a dedicated table. This tracks:
|
||||
|
||||
* `nip05` identifier
|
||||
* most recent verification timestamp
|
||||
* most recent verification failure timestamp
|
||||
* reference to the metadata event (used for tracking `created_at` and
|
||||
`pubkey`)
|
||||
|
||||
### Event Handling
|
||||
|
||||
All events are first validated to ensure the signature is valid.
|
||||
|
||||
Incoming events of kind _other_ than metadata (kind `0`) submitted by
|
||||
clients will be evaluated as follows.
|
||||
|
||||
* If the event's author has a current verification, the event is
|
||||
persisted as normal.
|
||||
* If the event's author has either no verification, or the
|
||||
verification is expired, the event is rejected.
|
||||
|
||||
If the event is a metadata event, we handle it differently.
|
||||
|
||||
We first determine the verification status of the event's pubkey.
|
||||
|
||||
* If the event author is unverified, AND the event contains a `nip05`
|
||||
key, we consider this a verification candidate.
|
||||
* If the event author is unverified, AND the event does not contain a
|
||||
`nip05` key, this is not a candidate, and the event is dropped.
|
||||
|
||||
* If the event author is verified, AND the event contains a `nip05`
|
||||
key that is identical to the currently stored value, no special
|
||||
action is needed.
|
||||
* If the event author is verified, AND the event contains a different
|
||||
`nip05` than was previously verified, with a more recent timestamp,
|
||||
we need to re-verify.
|
||||
* If the event author is verified, AND the event is missing a `nip05`
|
||||
key, and the event timestamp is more recent than what was verified,
|
||||
we do nothing. The current verification will be allowed to expire.
|
||||
|
||||
### Candidate Verification
|
||||
|
||||
When a candidate verification is requested, a rate limit will be
|
||||
utilized. If the rate limit is exceeded, new candidate verification
|
||||
requests will be dropped. In practice, this is implemented by a
|
||||
size-limited channel that drops events that exceed a threshold.
|
||||
|
||||
Candidates are never persisted in the database.
|
||||
|
||||
### Re-Verification
|
||||
|
||||
Re-verification is straightforward when there has been no change to
|
||||
the `nip05` key. A new request to the `nip05` domain is performed,
|
||||
and if successful, the verification timestamp is updated to the
|
||||
current time. If the request fails due to a timeout or server error,
|
||||
the failure timestamp is updated instead.
|
||||
|
||||
When the the `nip05` key has changed and this event is more recent, we
|
||||
will create a new verification record, and delete all other records
|
||||
for the same name.
|
||||
|
||||
Regarding creating new records vs. updating: We never update the event
|
||||
reference or `nip05` identifier in a verification record. Every update
|
||||
either reset the last failure or last success timestamp.
|
||||
|
||||
### Determining Verification Status
|
||||
|
||||
In determining if an event is from a verified author, the following
|
||||
procedure should be used:
|
||||
|
||||
Join the verification table with the event table, to provide
|
||||
verification data alongside the event `created_at` and `pubkey`
|
||||
metadata. Find the most recent verification record for the author,
|
||||
based on the `created_at` time.
|
||||
|
||||
Reject the record if the success timestamp is not within our
|
||||
configured expiration time.
|
||||
|
||||
Reject records with disallowed domains, based on any whitelists or
|
||||
blacklists in effect.
|
||||
|
||||
If a result remains, the author is treated as verified.
|
||||
|
||||
This does give a time window for authors transitioning their verified
|
||||
status between domains. There may be a period of time in which there
|
||||
are multiple valid rows in the verification table for a given author.
|
||||
|
||||
### Cleaning Up Inactive Verifications
|
||||
|
||||
After a author verification has expired, we will continue to check for
|
||||
it to become valid again. After a configurable number of attempts, we
|
||||
should simply forget it, and reclaim the space.
|
||||
|
||||
### Addition of Domain Whitelist/Blacklist
|
||||
|
||||
A set of whitelisted or blacklisted domains may be provided. If both
|
||||
are provided, only the whitelist is used. In this context, domains
|
||||
are either "allowed" (present on a whitelist and NOT present on a
|
||||
blacklist), or "denied" (NOT present on a whitelist and present on a
|
||||
blacklist).
|
||||
|
||||
The processes outlined so far are modified in the presence of these
|
||||
options:
|
||||
|
||||
* Only authors with allowed domains can become candidates for
|
||||
verification.
|
||||
* Verification status queries additionally filter out any denied
|
||||
domains.
|
||||
* Re-verification processes only proceed with allowed domains.
|
||||
|
||||
### Integration
|
||||
|
||||
We have an existing database writer thread, which receives events and
|
||||
attempts to persist them to disk. Once validated and persisted, these
|
||||
events are broadcast to all subscribers.
|
||||
|
||||
When verification is enabled, the writer must check to ensure a valid,
|
||||
unexpired verification record exists for the auther. All metadata
|
||||
events (regardless of verification status) are forwarded to a verifier
|
||||
module. If the verifier determines a new verification record is
|
||||
needed, it is also responsible for persisting and broadcasting the
|
||||
event, just as the database writer would have done.
|
||||
|
||||
## Threat Scenarios
|
||||
|
||||
Some of these mitigations are fully implemented, others are documented
|
||||
simply to demonstrate a mitigation is possible.
|
||||
|
||||
### Domain Spamming
|
||||
|
||||
*Threat*: A author with a high-volume of events creates a metadata event
|
||||
with a bogus domain, causing the relay to generate significant
|
||||
unwanted traffic to a target.
|
||||
|
||||
*Mitigation*: Rate limiting for all candidate verification will limit
|
||||
external requests to a reasonable amount. Currently, this is a simple
|
||||
delay that slows down the HTTP task.
|
||||
|
||||
### Denial of Service for Legitimate Authors
|
||||
|
||||
*Threat*: A author with a high-volume of events creates a metadata event
|
||||
with a domain that is invalid for them, _but which is used by other
|
||||
legitimate authors_. This triggers rate-limiting against the legitimate
|
||||
domain, and blocks authors from updating their own metadata.
|
||||
|
||||
*Mitigation*: Rate limiting should only apply to candidates, so any
|
||||
existing verified authors have priority for re-verification. New
|
||||
authors will be affected, as we can not distinguish between the threat
|
||||
and a legitimate author. _(Unimplemented)_
|
||||
|
||||
### Denial of Service by Consuming Storage
|
||||
|
||||
*Threat*: A author creates a high volume of random metadata events with
|
||||
unique domains, in order to cause us to store large amounts of data
|
||||
for to-be-verified authors.
|
||||
|
||||
*Mitigation*: No data is stored for candidate authors. This makes it
|
||||
harder for new authors to become verified, but is effective at
|
||||
preventing this attack.
|
||||
|
||||
### Metadata Replay for Verified Author
|
||||
|
||||
*Threat*: Attacker replays out-of-date metadata event for a author, to
|
||||
cause a verification to fail.
|
||||
|
||||
*Mitigation*: New metadata events have their signed timestamp compared
|
||||
against the signed timestamp of the event that has most recently
|
||||
verified them. If the metadata event is older, it is discarded.
|
||||
|
||||
### Server-Side Request Forgery via Metadata
|
||||
|
||||
*Threat*: Attacker includes malicious data in the `nip05` event, which
|
||||
is used to generate HTTP requests against potentially internal
|
||||
resources. Either leaking data, or invoking webservices beyond their
|
||||
own privileges.
|
||||
|
||||
*Mitigation*: Consider detecting and dropping when the `nip05` field
|
||||
is an IP address. Allow the relay operator to utilize the `blacklist`
|
||||
or `whitelist` to constrain hosts that will be contacted. Most
|
||||
importantly, the verification process is hardcoded to only make
|
||||
requests to a known url path
|
||||
(`.well-known/nostr.json?name=<LOCAL_NAME>`). The `<LOCAL_NAME>`
|
||||
component is restricted to a basic ASCII subset (preventing additional
|
||||
URL components).
|
53
reverse-proxy.md
Normal file
53
reverse-proxy.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# Reverse Proxy Setup Guide
|
||||
|
||||
It is recommended to run `nostr-rs-relay` behind a reverse proxy such
|
||||
as `haproxy` or `nginx` to provide TLS termination. A simple example
|
||||
of an `haproxy` configuration is documented here.
|
||||
|
||||
## Minimal HAProxy Configuration
|
||||
|
||||
Assumptions:
|
||||
|
||||
* HAProxy version is `2.4.10` or greater (older versions not tested).
|
||||
* Hostname for the relay is `relay.example.com`.
|
||||
* Your relay should be available over wss://relay.example.com
|
||||
* Your (NIP-11) relay info page should be available on https://relay.example.com
|
||||
* SSL certificate is located in `/etc/certs/example.com.pem`.
|
||||
* Relay is running on port 8080.
|
||||
* Limit connections to 400 concurrent.
|
||||
* HSTS (HTTP Strict Transport Security) is desired.
|
||||
* Only TLS 1.2 or greater is allowed.
|
||||
|
||||
```
|
||||
global
|
||||
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
|
||||
ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
|
||||
|
||||
frontend fe_prod
|
||||
mode http
|
||||
bind :443 ssl crt /etc/certs/example.com.pem alpn h2,http/1.1
|
||||
bind :80
|
||||
http-request set-header X-Forwarded-Proto https if { ssl_fc }
|
||||
redirect scheme https code 301 if !{ ssl_fc }
|
||||
acl host_relay hdr(host) -i relay.example.com
|
||||
use_backend relay if host_relay
|
||||
# HSTS (1 year)
|
||||
http-response set-header Strict-Transport-Security max-age=31536000
|
||||
|
||||
backend relay
|
||||
mode http
|
||||
timeout connect 5s
|
||||
timeout client 50s
|
||||
timeout server 50s
|
||||
timeout tunnel 1h
|
||||
timeout client-fin 30s
|
||||
option tcp-check
|
||||
default-server maxconn 400 check inter 20s fastinter 1s
|
||||
server relay 127.0.0.1:8080
|
||||
```
|
||||
|
||||
### Notes
|
||||
|
||||
You may experience WebSocket connection problems with Firefox if
|
||||
HTTP/2 is enabled, for older versions of HAProxy (2.3.x). Either
|
||||
disable HTTP/2 (`h2`), or upgrade HAProxy.
|
@@ -1,4 +1,6 @@
|
||||
//! Subscription close request parsing
|
||||
//!
|
||||
//! Representation and parsing of `CLOSE` messages sent from clients.
|
||||
use crate::error::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -11,7 +13,7 @@ pub struct CloseCmd {
|
||||
id: String,
|
||||
}
|
||||
|
||||
/// Close command parsed
|
||||
/// Identifier of the subscription to be closed.
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
pub struct Close {
|
||||
/// The subscription identifier being closed.
|
||||
|
118
src/config.rs
118
src/config.rs
@@ -1,17 +1,31 @@
|
||||
//! Configuration file and settings management
|
||||
use lazy_static::lazy_static;
|
||||
use log::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::RwLock;
|
||||
use std::time::Duration;
|
||||
|
||||
// initialize a singleton default configuration
|
||||
lazy_static! {
|
||||
pub static ref SETTINGS: RwLock<Settings> = RwLock::new(Settings::default());
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[allow(unused)]
|
||||
pub struct Info {
|
||||
pub relay_url: Option<String>,
|
||||
pub name: Option<String>,
|
||||
pub description: Option<String>,
|
||||
pub pubkey: Option<String>,
|
||||
pub contact: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Database {
|
||||
pub data_directory: String,
|
||||
pub min_conn: u32,
|
||||
pub max_conn: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@@ -49,12 +63,75 @@ pub struct Limits {
|
||||
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Authorization {
|
||||
pub pubkey_whitelist: Option<Vec<String>>, // If present, only allow these pubkeys to publish events
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum VerifiedUsersMode {
|
||||
Enabled,
|
||||
Passive,
|
||||
Disabled,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct VerifiedUsers {
|
||||
pub mode: VerifiedUsersMode, // Mode of operation: "enabled" (enforce) or "passive" (check only). If none, this is simply disabled.
|
||||
pub domain_whitelist: Option<Vec<String>>, // If present, only allow verified users from these domains can publish events
|
||||
pub domain_blacklist: Option<Vec<String>>, // If present, allow all verified users from any domain except these
|
||||
pub verify_expiration: Option<String>, // how long a verification is cached for before no longer being used
|
||||
pub verify_update_frequency: Option<String>, // how often to attempt to update verification
|
||||
pub verify_expiration_duration: Option<Duration>, // internal result of parsing verify_expiration
|
||||
pub verify_update_frequency_duration: Option<Duration>, // internal result of parsing verify_update_frequency
|
||||
pub max_consecutive_failures: usize, // maximum number of verification failures in a row, before ceasing future checks
|
||||
}
|
||||
|
||||
impl VerifiedUsers {
|
||||
pub fn init(&mut self) {
|
||||
self.verify_expiration_duration = self.verify_expiration_duration();
|
||||
self.verify_update_frequency_duration = self.verify_update_duration();
|
||||
}
|
||||
|
||||
pub fn is_enabled(&self) -> bool {
|
||||
self.mode == VerifiedUsersMode::Enabled
|
||||
}
|
||||
|
||||
pub fn is_active(&self) -> bool {
|
||||
self.mode == VerifiedUsersMode::Enabled || self.mode == VerifiedUsersMode::Passive
|
||||
}
|
||||
|
||||
pub fn is_passive(&self) -> bool {
|
||||
self.mode == VerifiedUsersMode::Passive
|
||||
}
|
||||
|
||||
pub fn verify_expiration_duration(&self) -> Option<Duration> {
|
||||
self.verify_expiration
|
||||
.as_ref()
|
||||
.and_then(|x| parse_duration::parse(x).ok())
|
||||
}
|
||||
pub fn verify_update_duration(&self) -> Option<Duration> {
|
||||
self.verify_update_frequency
|
||||
.as_ref()
|
||||
.and_then(|x| parse_duration::parse(x).ok())
|
||||
}
|
||||
pub fn is_valid(&self) -> bool {
|
||||
self.verify_expiration_duration().is_some() && self.verify_update_duration().is_some()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Settings {
|
||||
pub info: Info,
|
||||
pub database: Database,
|
||||
pub network: Network,
|
||||
pub limits: Limits,
|
||||
pub authorization: Authorization,
|
||||
pub verified_users: VerifiedUsers,
|
||||
pub retention: Retention,
|
||||
pub options: Options,
|
||||
}
|
||||
@@ -76,12 +153,25 @@ impl Settings {
|
||||
|
||||
fn new_from_default(default: &Settings) -> Result<Self, config::ConfigError> {
|
||||
let config: config::Config = config::Config::new();
|
||||
let settings: Settings = config
|
||||
let mut settings: Settings = config
|
||||
// use defaults
|
||||
.with_merged(config::Config::try_from(default).unwrap())?
|
||||
// override with file contents
|
||||
.with_merged(config::File::with_name("config"))?
|
||||
.try_into()?;
|
||||
// ensure connection pool size is logical
|
||||
if settings.database.min_conn > settings.database.max_conn {
|
||||
panic!(
|
||||
"Database min_conn setting ({}) cannot exceed max_conn ({})",
|
||||
settings.database.min_conn, settings.database.max_conn
|
||||
);
|
||||
}
|
||||
// ensure durations parse
|
||||
if !settings.verified_users.is_valid() {
|
||||
panic!("VerifiedUsers time settings could not be parsed");
|
||||
}
|
||||
// initialize durations for verified users
|
||||
settings.verified_users.init();
|
||||
Ok(settings)
|
||||
}
|
||||
}
|
||||
@@ -89,8 +179,17 @@ impl Settings {
|
||||
impl Default for Settings {
|
||||
fn default() -> Self {
|
||||
Settings {
|
||||
info: Info {
|
||||
relay_url: None,
|
||||
name: Some("Unnamed nostr-rs-relay".to_owned()),
|
||||
description: None,
|
||||
pubkey: None,
|
||||
contact: None,
|
||||
},
|
||||
database: Database {
|
||||
data_directory: ".".to_owned(),
|
||||
min_conn: 4,
|
||||
max_conn: 128,
|
||||
},
|
||||
network: Network {
|
||||
port: 8080,
|
||||
@@ -101,8 +200,21 @@ impl Default for Settings {
|
||||
max_event_bytes: Some(2 << 17), // 128K
|
||||
max_ws_message_bytes: Some(2 << 17), // 128K
|
||||
max_ws_frame_bytes: Some(2 << 17), // 128K
|
||||
broadcast_buffer: 4096,
|
||||
event_persist_buffer: 16,
|
||||
broadcast_buffer: 16384,
|
||||
event_persist_buffer: 4096,
|
||||
},
|
||||
authorization: Authorization {
|
||||
pubkey_whitelist: None, // Allow any address to publish
|
||||
},
|
||||
verified_users: VerifiedUsers {
|
||||
mode: VerifiedUsersMode::Disabled,
|
||||
domain_whitelist: None,
|
||||
domain_blacklist: None,
|
||||
verify_expiration: Some("1 week".to_owned()),
|
||||
verify_update_frequency: Some("1 day".to_owned()),
|
||||
verify_expiration_duration: None,
|
||||
verify_update_frequency_duration: None,
|
||||
max_consecutive_failures: 20,
|
||||
},
|
||||
retention: Retention {
|
||||
max_events: None, // max events
|
||||
|
540
src/db.rs
540
src/db.rs
@@ -1,140 +1,122 @@
|
||||
//! Event persistence and querying
|
||||
use crate::config::SETTINGS;
|
||||
use crate::error::Error;
|
||||
use crate::error::Result;
|
||||
use crate::event::Event;
|
||||
use crate::hexrange::hex_range;
|
||||
use crate::hexrange::HexSearch;
|
||||
use crate::nip05;
|
||||
use crate::schema::{upgrade_db, STARTUP_SQL};
|
||||
use crate::subscription::Subscription;
|
||||
use crate::utils::is_hex;
|
||||
use governor::clock::Clock;
|
||||
use governor::{Quota, RateLimiter};
|
||||
use hex;
|
||||
use log::*;
|
||||
use r2d2;
|
||||
use r2d2_sqlite::SqliteConnectionManager;
|
||||
use rusqlite::params;
|
||||
use rusqlite::types::ToSql;
|
||||
use rusqlite::Connection;
|
||||
use rusqlite::OpenFlags;
|
||||
//use std::num::NonZeroU32;
|
||||
use crate::config::SETTINGS;
|
||||
use std::path::Path;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use tokio::task;
|
||||
|
||||
pub type SqlitePool = r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>;
|
||||
pub type PooledConnection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnectionManager>;
|
||||
|
||||
/// Events submitted from a client, with a return channel for notices
|
||||
pub struct SubmittedEvent {
|
||||
pub event: Event,
|
||||
pub notice_tx: tokio::sync::mpsc::Sender<String>,
|
||||
}
|
||||
|
||||
/// Database file
|
||||
const DB_FILE: &str = "nostr.db";
|
||||
pub const DB_FILE: &str = "nostr.db";
|
||||
|
||||
/// Startup DB Pragmas
|
||||
const STARTUP_SQL: &str = r##"
|
||||
PRAGMA main.synchronous=NORMAL;
|
||||
PRAGMA foreign_keys = ON;
|
||||
pragma mmap_size = 536870912; -- 512MB of mmap
|
||||
"##;
|
||||
/// Build a database connection pool.
|
||||
pub fn build_pool(
|
||||
name: &str,
|
||||
flags: OpenFlags,
|
||||
min_size: u32,
|
||||
max_size: u32,
|
||||
wait_for_db: bool,
|
||||
) -> SqlitePool {
|
||||
let settings = SETTINGS.read().unwrap();
|
||||
|
||||
/// Schema definition
|
||||
const INIT_SQL: &str = r##"
|
||||
-- Database settings
|
||||
PRAGMA encoding = "UTF-8";
|
||||
PRAGMA journal_mode=WAL;
|
||||
PRAGMA main.synchronous=NORMAL;
|
||||
PRAGMA foreign_keys = ON;
|
||||
PRAGMA application_id = 1654008667;
|
||||
PRAGMA user_version = 2;
|
||||
|
||||
-- Event Table
|
||||
CREATE TABLE IF NOT EXISTS event (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_hash BLOB NOT NULL, -- 4-byte hash
|
||||
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970)
|
||||
created_at INTEGER NOT NULL, -- when the event was authored
|
||||
author BLOB NOT NULL, -- author pubkey
|
||||
kind INTEGER NOT NULL, -- event kind
|
||||
hidden INTEGER, -- relevant for queries
|
||||
content TEXT NOT NULL -- serialized json of event object
|
||||
);
|
||||
|
||||
-- Event Indexes
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
|
||||
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
|
||||
CREATE INDEX IF NOT EXISTS author_index ON event(author);
|
||||
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
|
||||
|
||||
-- Event References Table
|
||||
CREATE TABLE IF NOT EXISTS event_ref (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL, -- an event ID that contains an #e tag.
|
||||
referenced_event BLOB NOT NULL, -- the event that is referenced.
|
||||
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Event References Index
|
||||
CREATE INDEX IF NOT EXISTS event_ref_index ON event_ref(referenced_event);
|
||||
|
||||
-- Pubkey References Table
|
||||
CREATE TABLE IF NOT EXISTS pubkey_ref (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL, -- an event ID that contains an #p tag.
|
||||
referenced_pubkey BLOB NOT NULL, -- the pubkey that is referenced.
|
||||
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE RESTRICT ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Pubkey References Index
|
||||
CREATE INDEX IF NOT EXISTS pubkey_ref_index ON pubkey_ref(referenced_pubkey);
|
||||
"##;
|
||||
|
||||
/// Upgrade DB to latest version, and execute pragma settings
|
||||
pub fn upgrade_db(conn: &mut Connection) -> Result<()> {
|
||||
// check the version.
|
||||
let curr_version = db_version(conn)?;
|
||||
info!("DB version = {:?}", curr_version);
|
||||
|
||||
// initialize from scratch
|
||||
if curr_version == 0 {
|
||||
match conn.execute_batch(INIT_SQL) {
|
||||
Ok(()) => info!("database pragma/schema initialized to v2, and ready"),
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be initialized");
|
||||
}
|
||||
}
|
||||
} else if curr_version == 1 {
|
||||
// only change is adding a hidden column to events.
|
||||
let upgrade_sql = r##"
|
||||
ALTER TABLE event ADD hidden INTEGER;
|
||||
UPDATE event SET hidden=FALSE;
|
||||
PRAGMA user_version = 2;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => info!("database schema upgraded v1 -> v2"),
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
} else if curr_version == 2 {
|
||||
debug!("Database version was already current");
|
||||
} else if curr_version > 2 {
|
||||
panic!("Database version is newer than supported by this executable");
|
||||
let db_dir = &settings.database.data_directory;
|
||||
let full_path = Path::new(db_dir).join(DB_FILE);
|
||||
// small hack; if the database doesn't exist yet, that means the
|
||||
// writer thread hasn't finished. Give it a chance to work. This
|
||||
// is only an issue with the first time we run.
|
||||
while !full_path.exists() && wait_for_db {
|
||||
debug!("Database reader pool is waiting on the database to be created...");
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
}
|
||||
// Setup PRAGMA
|
||||
conn.execute_batch(STARTUP_SQL)?;
|
||||
Ok(())
|
||||
let manager = SqliteConnectionManager::file(&full_path)
|
||||
.with_flags(flags)
|
||||
.with_init(|c| c.execute_batch(STARTUP_SQL));
|
||||
let pool: SqlitePool = r2d2::Pool::builder()
|
||||
.test_on_check_out(true) // no noticeable performance hit
|
||||
.min_idle(Some(min_size))
|
||||
.max_size(max_size)
|
||||
.build(manager)
|
||||
.unwrap();
|
||||
info!(
|
||||
"Built a connection pool {:?} (min={}, max={})",
|
||||
name, min_size, max_size
|
||||
);
|
||||
pool
|
||||
}
|
||||
|
||||
/// Build a single database connection, with provided flags
|
||||
pub fn build_conn(flags: OpenFlags) -> Result<Connection> {
|
||||
let settings = SETTINGS.read().unwrap();
|
||||
let db_dir = &settings.database.data_directory;
|
||||
let full_path = Path::new(db_dir).join(DB_FILE);
|
||||
// create a connection
|
||||
Ok(Connection::open_with_flags(&full_path, flags)?)
|
||||
}
|
||||
|
||||
/// Spawn a database writer that persists events to the SQLite store.
|
||||
pub async fn db_writer(
|
||||
mut event_rx: tokio::sync::mpsc::Receiver<Event>,
|
||||
mut event_rx: tokio::sync::mpsc::Receiver<SubmittedEvent>,
|
||||
bcast_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
metadata_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
mut shutdown: tokio::sync::broadcast::Receiver<()>,
|
||||
) -> tokio::task::JoinHandle<Result<()>> {
|
||||
let settings = SETTINGS.read().unwrap();
|
||||
|
||||
// are we performing NIP-05 checking?
|
||||
let nip05_active = settings.verified_users.is_active();
|
||||
// are we requriing NIP-05 user verification?
|
||||
let nip05_enabled = settings.verified_users.is_enabled();
|
||||
|
||||
task::spawn_blocking(move || {
|
||||
// get database configuration settings
|
||||
let config = SETTINGS.read().unwrap();
|
||||
let db_dir = &config.database.data_directory;
|
||||
let settings = SETTINGS.read().unwrap();
|
||||
let db_dir = &settings.database.data_directory;
|
||||
let full_path = Path::new(db_dir).join(DB_FILE);
|
||||
// create a connection
|
||||
let mut conn = Connection::open_with_flags(
|
||||
&full_path,
|
||||
// create a connection pool
|
||||
let pool = build_pool(
|
||||
"event writer",
|
||||
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
|
||||
)?;
|
||||
1,
|
||||
4,
|
||||
false,
|
||||
);
|
||||
info!("opened database {:?} for writing", full_path);
|
||||
upgrade_db(&mut conn)?;
|
||||
upgrade_db(&mut pool.get()?)?;
|
||||
|
||||
// Make a copy of the whitelist
|
||||
let whitelist = &settings.authorization.pubkey_whitelist.clone();
|
||||
|
||||
// get rate limit settings
|
||||
let rps_setting = config.limits.messages_per_sec;
|
||||
let rps_setting = settings.limits.messages_per_sec;
|
||||
let mut most_recent_rate_limit = Instant::now();
|
||||
let mut lim_opt = None;
|
||||
let clock = governor::clock::QuantaClock::default();
|
||||
if let Some(rps) = rps_setting {
|
||||
@@ -156,47 +138,132 @@ pub async fn db_writer(
|
||||
break;
|
||||
}
|
||||
let mut event_write = false;
|
||||
let event = next_event.unwrap();
|
||||
match write_event(&mut conn, &event) {
|
||||
let subm_event = next_event.unwrap();
|
||||
let event = subm_event.event;
|
||||
let notice_tx = subm_event.notice_tx;
|
||||
// check if this event is authorized.
|
||||
if let Some(allowed_addrs) = whitelist {
|
||||
// if the event address is not in allowed_addrs.
|
||||
if !allowed_addrs.contains(&event.pubkey) {
|
||||
info!(
|
||||
"Rejecting event {}, unauthorized author",
|
||||
event.get_event_id_prefix()
|
||||
);
|
||||
notice_tx
|
||||
.try_send("pubkey is not allowed to publish to this relay".to_owned())
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// send any metadata events to the NIP-05 verifier
|
||||
if nip05_active && event.is_kind_metadata() {
|
||||
// we are sending this prior to even deciding if we
|
||||
// persist it. this allows the nip05 module to
|
||||
// inspect it, update if necessary, or persist a new
|
||||
// event and broadcast it itself.
|
||||
metadata_tx.send(event.clone()).ok();
|
||||
}
|
||||
|
||||
// check for NIP-05 verification
|
||||
if nip05_enabled {
|
||||
match nip05::query_latest_user_verification(pool.get()?, event.pubkey.to_owned()) {
|
||||
Ok(uv) => {
|
||||
if uv.is_valid() {
|
||||
info!(
|
||||
"new event from verified author ({:?},{:?})",
|
||||
uv.name.to_string(),
|
||||
event.get_author_prefix()
|
||||
);
|
||||
} else {
|
||||
info!("rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)",
|
||||
uv.name.to_string(),
|
||||
event.get_author_prefix()
|
||||
);
|
||||
notice_tx
|
||||
.try_send(
|
||||
"NIP-05 verification is no longer valid (expired/wrong domain)"
|
||||
.to_owned(),
|
||||
)
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
|
||||
debug!(
|
||||
"no verification records found for pubkey: {:?}",
|
||||
event.get_author_prefix()
|
||||
);
|
||||
notice_tx
|
||||
.try_send("NIP-05 verification needed to publish events".to_owned())
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("checking nip05 verification status failed: {:?}", e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: cache recent list of authors to remove a DB call.
|
||||
let start = Instant::now();
|
||||
match write_event(&mut pool.get()?, &event) {
|
||||
Ok(updated) => {
|
||||
if updated == 0 {
|
||||
debug!("ignoring duplicate event");
|
||||
trace!("ignoring duplicate event");
|
||||
} else {
|
||||
info!("persisted event: {}", event.get_event_id_prefix());
|
||||
info!(
|
||||
"persisted event {:?} from {:?} in {:?}",
|
||||
event.get_event_id_prefix(),
|
||||
event.get_author_prefix(),
|
||||
start.elapsed()
|
||||
);
|
||||
event_write = true;
|
||||
// send this out to all clients
|
||||
bcast_tx.send(event.clone()).ok();
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("event insert failed: {}", err);
|
||||
warn!("event insert failed: {:?}", err);
|
||||
notice_tx
|
||||
.try_send(
|
||||
"relay experienced an error trying to publish the latest event"
|
||||
.to_owned(),
|
||||
)
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
|
||||
// use rate limit, if defined, and if an event was actually written.
|
||||
if event_write {
|
||||
if let Some(ref lim) = lim_opt {
|
||||
if let Err(n) = lim.check() {
|
||||
info!("Rate limiting event creation");
|
||||
thread::sleep(n.wait_time_from(clock.now()));
|
||||
let wait_for = n.wait_time_from(clock.now());
|
||||
// check if we have recently logged rate
|
||||
// limits, but print out a message only once
|
||||
// per second.
|
||||
if most_recent_rate_limit.elapsed().as_secs() > 10 {
|
||||
warn!(
|
||||
"rate limit reached for event creation (sleep for {:?}) (suppressing future messages for 10 seconds)",
|
||||
wait_for
|
||||
);
|
||||
// reset last rate limit message
|
||||
most_recent_rate_limit = Instant::now();
|
||||
}
|
||||
// block event writes, allowing them to queue up
|
||||
thread::sleep(wait_for);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
conn.close().ok();
|
||||
info!("database connection closed");
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn db_version(conn: &mut Connection) -> Result<usize> {
|
||||
let query = "PRAGMA user_version;";
|
||||
let curr_version = conn.query_row(query, [], |row| row.get(0))?;
|
||||
Ok(curr_version)
|
||||
}
|
||||
|
||||
/// Persist an event to the database.
|
||||
pub fn write_event(conn: &mut Connection, e: &Event) -> Result<usize> {
|
||||
/// Persist an event to the database, returning rows added.
|
||||
pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
|
||||
// start transaction
|
||||
let tx = conn.transaction()?;
|
||||
// get relevant fields from event and convert to blobs.
|
||||
@@ -215,24 +282,24 @@ pub fn write_event(conn: &mut Connection, e: &Event) -> Result<usize> {
|
||||
}
|
||||
// remember primary key of the event most recently inserted.
|
||||
let ev_id = tx.last_insert_rowid();
|
||||
// add all event tags into the event_ref table
|
||||
let etags = e.get_event_tags();
|
||||
if !etags.is_empty() {
|
||||
for etag in etags.iter() {
|
||||
tx.execute(
|
||||
"INSERT OR IGNORE INTO event_ref (event_id, referenced_event) VALUES (?1, ?2)",
|
||||
params![ev_id, hex::decode(&etag).ok()],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
// add all event tags into the pubkey_ref table
|
||||
let ptags = e.get_pubkey_tags();
|
||||
if !ptags.is_empty() {
|
||||
for ptag in ptags.iter() {
|
||||
tx.execute(
|
||||
"INSERT OR IGNORE INTO pubkey_ref (event_id, referenced_pubkey) VALUES (?1, ?2)",
|
||||
params![ev_id, hex::decode(&ptag).ok()],
|
||||
)?;
|
||||
// add all tags to the tag table
|
||||
for tag in e.tags.iter() {
|
||||
// ensure we have 2 values.
|
||||
if tag.len() >= 2 {
|
||||
let tagname = &tag[0];
|
||||
let tagval = &tag[1];
|
||||
// if tagvalue is hex;
|
||||
if is_hex(tagval) {
|
||||
tx.execute(
|
||||
"INSERT OR IGNORE INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3)",
|
||||
params![ev_id, &tagname, hex::decode(&tagval).ok()],
|
||||
)?;
|
||||
} else {
|
||||
tx.execute(
|
||||
"INSERT OR IGNORE INTO tag (event_id, name, value) VALUES (?1, ?2, ?3)",
|
||||
params![ev_id, &tagname, &tagval],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
// if this event is for a metadata update, hide every other kind=0
|
||||
@@ -243,7 +310,11 @@ pub fn write_event(conn: &mut Connection, e: &Event) -> Result<usize> {
|
||||
params![ev_id, hex::decode(&e.pubkey).ok(), e.created_at],
|
||||
)?;
|
||||
if update_count > 0 {
|
||||
info!("hid {} older metadata events", update_count);
|
||||
info!(
|
||||
"hid {} older metadata events for author {:?}",
|
||||
update_count,
|
||||
e.get_author_prefix()
|
||||
);
|
||||
}
|
||||
}
|
||||
// if this event is for a contact update, hide every other kind=3
|
||||
@@ -254,14 +325,18 @@ pub fn write_event(conn: &mut Connection, e: &Event) -> Result<usize> {
|
||||
params![ev_id, hex::decode(&e.pubkey).ok(), e.created_at],
|
||||
)?;
|
||||
if update_count > 0 {
|
||||
info!("hid {} older contact events", update_count);
|
||||
info!(
|
||||
"hid {} older contact events for author {:?}",
|
||||
update_count,
|
||||
e.get_author_prefix()
|
||||
);
|
||||
}
|
||||
}
|
||||
tx.commit()?;
|
||||
Ok(ins_count)
|
||||
}
|
||||
|
||||
/// Event resulting from a specific subscription request
|
||||
/// Serialized event associated with a specific subscription request.
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
pub struct QueryResult {
|
||||
/// Subscription identifier
|
||||
@@ -270,65 +345,119 @@ pub struct QueryResult {
|
||||
pub event: String,
|
||||
}
|
||||
|
||||
/// Check if a string contains only hex characters.
|
||||
fn is_hex(s: &str) -> bool {
|
||||
s.chars().all(|x| char::is_ascii_hexdigit(&x))
|
||||
/// Produce a arbitrary list of '?' parameters.
|
||||
fn repeat_vars(count: usize) -> String {
|
||||
if count == 0 {
|
||||
return "".to_owned();
|
||||
}
|
||||
let mut s = "?,".repeat(count);
|
||||
// Remove trailing comma
|
||||
s.pop();
|
||||
s
|
||||
}
|
||||
|
||||
/// Create a dynamic SQL query string from a subscription.
|
||||
fn query_from_sub(sub: &Subscription) -> String {
|
||||
/// Create a dynamic SQL query string and params from a subscription.
|
||||
fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
|
||||
// build a dynamic SQL query. all user-input is either an integer
|
||||
// (sqli-safe), or a string that is filtered to only contain
|
||||
// hexadecimal characters.
|
||||
// hexadecimal characters. Strings that require escaping (tag
|
||||
// names/values) use parameters.
|
||||
let mut query =
|
||||
"SELECT DISTINCT(e.content) FROM event e LEFT JOIN event_ref er ON e.id=er.event_id LEFT JOIN pubkey_ref pr ON e.id=pr.event_id "
|
||||
.to_owned();
|
||||
"SELECT DISTINCT(e.content) FROM event e LEFT JOIN tag t ON e.id=t.event_id ".to_owned();
|
||||
// parameters
|
||||
let mut params: Vec<Box<dyn ToSql>> = vec![];
|
||||
|
||||
// for every filter in the subscription, generate a where clause
|
||||
let mut filter_clauses: Vec<String> = Vec::new();
|
||||
for f in sub.filters.iter() {
|
||||
// individual filter components
|
||||
let mut filter_components: Vec<String> = Vec::new();
|
||||
// Query for "authors"
|
||||
if f.authors.is_some() {
|
||||
let authors_escaped: Vec<String> = f
|
||||
.authors
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.filter(|&x| is_hex(x))
|
||||
.map(|x| format!("x'{}'", x))
|
||||
.collect();
|
||||
let authors_clause = format!("author IN ({})", authors_escaped.join(", "));
|
||||
// Query for "authors", allowing prefix matches
|
||||
if let Some(authvec) = &f.authors {
|
||||
// take each author and convert to a hexsearch
|
||||
let mut auth_searches: Vec<String> = vec![];
|
||||
for auth in authvec {
|
||||
match hex_range(auth) {
|
||||
Some(HexSearch::Exact(ex)) => {
|
||||
auth_searches.push("author=?".to_owned());
|
||||
params.push(Box::new(ex));
|
||||
}
|
||||
Some(HexSearch::Range(lower, upper)) => {
|
||||
auth_searches.push("(author>? AND author<?)".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
params.push(Box::new(upper));
|
||||
}
|
||||
Some(HexSearch::LowerOnly(lower)) => {
|
||||
auth_searches.push("author>?".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
}
|
||||
None => {
|
||||
info!("Could not parse hex range from author {:?}", auth);
|
||||
}
|
||||
}
|
||||
}
|
||||
let authors_clause = format!("({})", auth_searches.join(" OR "));
|
||||
filter_components.push(authors_clause);
|
||||
}
|
||||
// Query for Kind
|
||||
if f.kind.is_some() {
|
||||
if let Some(ks) = &f.kinds {
|
||||
// kind is number, no escaping needed
|
||||
let kind_clause = format!("kind = {}", f.kind.unwrap());
|
||||
let str_kinds: Vec<String> = ks.iter().map(|x| x.to_string()).collect();
|
||||
let kind_clause = format!("kind IN ({})", str_kinds.join(", "));
|
||||
filter_components.push(kind_clause);
|
||||
}
|
||||
// Query for event
|
||||
if f.id.is_some() {
|
||||
let id_str = f.id.as_ref().unwrap();
|
||||
if is_hex(id_str) {
|
||||
let id_clause = format!("event_hash = x'{}'", id_str);
|
||||
filter_components.push(id_clause);
|
||||
// Query for event, allowing prefix matches
|
||||
if let Some(idvec) = &f.ids {
|
||||
// take each author and convert to a hexsearch
|
||||
let mut id_searches: Vec<String> = vec![];
|
||||
for id in idvec {
|
||||
match hex_range(id) {
|
||||
Some(HexSearch::Exact(ex)) => {
|
||||
id_searches.push("event_hash=?".to_owned());
|
||||
params.push(Box::new(ex));
|
||||
}
|
||||
Some(HexSearch::Range(lower, upper)) => {
|
||||
id_searches.push("(event_hash>? AND event_hash<?)".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
params.push(Box::new(upper));
|
||||
}
|
||||
Some(HexSearch::LowerOnly(lower)) => {
|
||||
id_searches.push("event_hash>?".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
}
|
||||
None => {
|
||||
info!("Could not parse hex range from id {:?}", id);
|
||||
}
|
||||
}
|
||||
}
|
||||
let id_clause = format!("({})", id_searches.join(" OR "));
|
||||
filter_components.push(id_clause);
|
||||
}
|
||||
// Query for referenced event
|
||||
if f.event.is_some() {
|
||||
let ev_str = f.event.as_ref().unwrap();
|
||||
if is_hex(ev_str) {
|
||||
let ev_clause = format!("referenced_event = x'{}'", ev_str);
|
||||
filter_components.push(ev_clause);
|
||||
}
|
||||
}
|
||||
// Query for referenced pet name pubkey
|
||||
if f.pubkey.is_some() {
|
||||
let pet_str = f.pubkey.as_ref().unwrap();
|
||||
if is_hex(pet_str) {
|
||||
let pet_clause = format!("referenced_pubkey = x'{}'", pet_str);
|
||||
filter_components.push(pet_clause);
|
||||
// Query for tags
|
||||
if let Some(map) = &f.tags {
|
||||
for (key, val) in map.iter() {
|
||||
let mut str_vals: Vec<Box<dyn ToSql>> = vec![];
|
||||
let mut blob_vals: Vec<Box<dyn ToSql>> = vec![];
|
||||
for v in val {
|
||||
if is_hex(v) {
|
||||
if let Ok(h) = hex::decode(&v) {
|
||||
blob_vals.push(Box::new(h));
|
||||
}
|
||||
} else {
|
||||
str_vals.push(Box::new(v.to_owned()));
|
||||
}
|
||||
}
|
||||
// create clauses with "?" params for each tag value being searched
|
||||
let str_clause = format!("value IN ({})", repeat_vars(str_vals.len()));
|
||||
let blob_clause = format!("value_hex IN ({})", repeat_vars(blob_vals.len()));
|
||||
let tag_clause = format!("(name=? AND ({} OR {}))", str_clause, blob_clause);
|
||||
// add the tag name as the first parameter
|
||||
params.push(Box::new(key.to_owned()));
|
||||
// add all tag values that are plain strings as params
|
||||
params.append(&mut str_vals);
|
||||
// add all tag values that are blobs as params
|
||||
params.append(&mut blob_vals);
|
||||
filter_components.push(tag_clause);
|
||||
}
|
||||
}
|
||||
// Query for timestamp
|
||||
@@ -348,21 +477,22 @@ fn query_from_sub(sub: &Subscription) -> String {
|
||||
fc.push_str(&filter_components.join(" AND "));
|
||||
fc.push_str(" )");
|
||||
filter_clauses.push(fc);
|
||||
} else {
|
||||
// never display hidden events
|
||||
filter_clauses.push("hidden!=TRUE".to_owned());
|
||||
}
|
||||
}
|
||||
|
||||
// never display hidden events
|
||||
query.push_str(" WHERE hidden!=TRUE ");
|
||||
|
||||
// combine all filters with OR clauses, if any exist
|
||||
if !filter_clauses.is_empty() {
|
||||
query.push_str(" WHERE ");
|
||||
query.push_str(" AND (");
|
||||
query.push_str(&filter_clauses.join(" OR "));
|
||||
query.push_str(") ");
|
||||
}
|
||||
// add order clause
|
||||
query.push_str(" ORDER BY created_at ASC");
|
||||
debug!("query string: {}", query);
|
||||
query
|
||||
(query, params)
|
||||
}
|
||||
|
||||
/// Perform a database query using a subscription.
|
||||
@@ -373,31 +503,27 @@ fn query_from_sub(sub: &Subscription) -> String {
|
||||
/// query is immediately aborted.
|
||||
pub async fn db_query(
|
||||
sub: Subscription,
|
||||
conn: PooledConnection,
|
||||
query_tx: tokio::sync::mpsc::Sender<QueryResult>,
|
||||
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
|
||||
) {
|
||||
task::spawn_blocking(move || {
|
||||
let config = SETTINGS.read().unwrap();
|
||||
let db_dir = &config.database.data_directory;
|
||||
let full_path = Path::new(db_dir).join(DB_FILE);
|
||||
|
||||
let conn =
|
||||
Connection::open_with_flags(&full_path, OpenFlags::SQLITE_OPEN_READ_ONLY).unwrap();
|
||||
debug!("opened database for reading");
|
||||
debug!("going to query for: {:?}", sub);
|
||||
let mut row_count: usize = 0;
|
||||
let start = Instant::now();
|
||||
// generate SQL query
|
||||
let q = query_from_sub(&sub);
|
||||
// execute the query
|
||||
let mut stmt = conn.prepare(&q).unwrap();
|
||||
let mut event_rows = stmt.query([]).unwrap();
|
||||
while let Some(row) = event_rows.next().unwrap() {
|
||||
let (q, p) = query_from_sub(&sub);
|
||||
// execute the query. Don't cache, since queries vary so much.
|
||||
let mut stmt = conn.prepare(&q)?;
|
||||
let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?;
|
||||
while let Some(row) = event_rows.next()? {
|
||||
// check if this is still active (we could do this every N rows)
|
||||
if abandon_query_rx.try_recv().is_ok() {
|
||||
debug!("query aborted");
|
||||
return;
|
||||
return Ok(());
|
||||
}
|
||||
// TODO: check before unwrapping
|
||||
let event_json = row.get(0).unwrap();
|
||||
row_count += 1;
|
||||
let event_json = row.get(0)?;
|
||||
query_tx
|
||||
.blocking_send(QueryResult {
|
||||
sub_id: sub.get_id(),
|
||||
@@ -405,6 +531,12 @@ pub async fn db_query(
|
||||
})
|
||||
.ok();
|
||||
}
|
||||
debug!("query completed");
|
||||
debug!(
|
||||
"query completed ({} rows) in {:?}",
|
||||
row_count,
|
||||
start.elapsed()
|
||||
);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
});
|
||||
}
|
||||
|
35
src/error.rs
35
src/error.rs
@@ -40,6 +40,41 @@ pub enum Error {
|
||||
ConfigError(config::ConfigError),
|
||||
#[error("Data directory does not exist")]
|
||||
DatabaseDirError,
|
||||
#[error("Database Connection Pool Error")]
|
||||
DatabasePoolError(r2d2::Error),
|
||||
#[error("Custom Error : {0}")]
|
||||
CustomError(String),
|
||||
#[error("Task join error")]
|
||||
JoinError,
|
||||
#[error("Hyper Client error")]
|
||||
HyperError(hyper::Error),
|
||||
#[error("Unknown/Undocumented")]
|
||||
UnknownError,
|
||||
}
|
||||
|
||||
//impl From<Box<dyn std::error::Error>> for Error {
|
||||
// fn from(e: Box<dyn std::error::Error>) -> Self {
|
||||
// Error::CustomError("error".to_owned())
|
||||
// }
|
||||
//}
|
||||
|
||||
impl From<hyper::Error> for Error {
|
||||
fn from(h: hyper::Error) -> Self {
|
||||
Error::HyperError(h)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<r2d2::Error> for Error {
|
||||
fn from(d: r2d2::Error) -> Self {
|
||||
Error::DatabasePoolError(d)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<tokio::task::JoinError> for Error {
|
||||
/// Wrap SQL error
|
||||
fn from(_j: tokio::task::JoinError) -> Self {
|
||||
Error::JoinError
|
||||
}
|
||||
}
|
||||
|
||||
impl From<rusqlite::Error> for Error {
|
||||
|
142
src/event.rs
142
src/event.rs
@@ -2,6 +2,8 @@
|
||||
use crate::config;
|
||||
use crate::error::Error::*;
|
||||
use crate::error::Result;
|
||||
use crate::nip05;
|
||||
use crate::utils::unix_time;
|
||||
use bitcoin_hashes::{sha256, Hash};
|
||||
use lazy_static::lazy_static;
|
||||
use log::*;
|
||||
@@ -9,21 +11,23 @@ use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey};
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
use serde_json::value::Value;
|
||||
use serde_json::Number;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::str::FromStr;
|
||||
use std::time::SystemTime;
|
||||
|
||||
lazy_static! {
|
||||
/// Secp256k1 verification instance.
|
||||
pub static ref SECP: Secp256k1<VerifyOnly> = Secp256k1::verification_only();
|
||||
}
|
||||
|
||||
/// Event command in network format
|
||||
/// Event command in network format.
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
pub struct EventCmd {
|
||||
cmd: String, // expecting static "EVENT"
|
||||
event: Event,
|
||||
}
|
||||
|
||||
/// Event parsed
|
||||
/// Parsed nostr event.
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
pub struct Event {
|
||||
pub id: String,
|
||||
@@ -35,6 +39,9 @@ pub struct Event {
|
||||
pub(crate) tags: Vec<Vec<String>>,
|
||||
pub(crate) content: String,
|
||||
pub(crate) sig: String,
|
||||
// Optimization for tag search, built on demand
|
||||
#[serde(skip)]
|
||||
pub(crate) tagidx: Option<HashMap<String, HashSet<String>>>,
|
||||
}
|
||||
|
||||
/// Simple tag type for array of array of strings.
|
||||
@@ -56,26 +63,66 @@ impl From<EventCmd> for Result<Event> {
|
||||
if ec.cmd != "EVENT" {
|
||||
Err(CommandUnknownError)
|
||||
} else if ec.event.is_valid() {
|
||||
Ok(ec.event)
|
||||
let mut e = ec.event;
|
||||
e.build_index();
|
||||
Ok(e)
|
||||
} else {
|
||||
Err(EventInvalid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Seconds since 1970
|
||||
fn unix_time() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map(|x| x.as_secs())
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
impl Event {
|
||||
pub fn is_kind_metadata(&self) -> bool {
|
||||
self.kind == 0
|
||||
}
|
||||
|
||||
/// Pull a NIP-05 Name out of the event, if one exists
|
||||
pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
|
||||
if self.is_kind_metadata() {
|
||||
// very quick check if we should attempt to parse this json
|
||||
if self.content.contains("\"nip05\"") {
|
||||
// Parse into JSON
|
||||
let md_parsed: Value = serde_json::from_str(&self.content).ok()?;
|
||||
let md_map = md_parsed.as_object()?;
|
||||
let nip05_str = md_map.get("nip05")?.as_str()?;
|
||||
return nip05::Nip05Name::try_from(nip05_str).ok();
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Build an event tag index
|
||||
fn build_index(&mut self) {
|
||||
// if there are no tags; just leave the index as None
|
||||
if self.tags.is_empty() {
|
||||
return;
|
||||
}
|
||||
// otherwise, build an index
|
||||
let mut idx: HashMap<String, HashSet<String>> = HashMap::new();
|
||||
// iterate over tags that have at least 2 elements
|
||||
for t in self.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagval = t.get(1).unwrap();
|
||||
// ensure a vector exists for this tag
|
||||
if !idx.contains_key(tagname) {
|
||||
idx.insert(tagname.clone(), HashSet::new());
|
||||
}
|
||||
// get the tag vec and insert entry
|
||||
let tidx = idx.get_mut(tagname).expect("could not get tag vector");
|
||||
tidx.insert(tagval.clone());
|
||||
}
|
||||
// save the tag structure
|
||||
self.tagidx = Some(idx);
|
||||
}
|
||||
|
||||
/// Create a short event identifier, suitable for logging.
|
||||
pub fn get_event_id_prefix(&self) -> String {
|
||||
self.id.chars().take(8).collect()
|
||||
}
|
||||
pub fn get_author_prefix(&self) -> String {
|
||||
self.pubkey.chars().take(8).collect()
|
||||
}
|
||||
|
||||
/// Check if this event has a valid signature.
|
||||
fn is_valid(&self) -> bool {
|
||||
@@ -102,7 +149,7 @@ impl Event {
|
||||
// * serialize with no spaces/newlines
|
||||
let c_opt = self.to_canonical();
|
||||
if c_opt.is_none() {
|
||||
info!("event could not be canonicalized");
|
||||
debug!("event could not be canonicalized");
|
||||
return false;
|
||||
}
|
||||
let c = c_opt.unwrap();
|
||||
@@ -111,16 +158,22 @@ impl Event {
|
||||
let hex_digest = format!("{:x}", digest);
|
||||
// * ensure the id matches the computed sha256sum.
|
||||
if self.id != hex_digest {
|
||||
debug!("event id does not match digest");
|
||||
return false;
|
||||
}
|
||||
// * validate the message digest (sig) using the pubkey & computed sha256 message hash.
|
||||
|
||||
let sig = schnorr::Signature::from_str(&self.sig).unwrap();
|
||||
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
|
||||
let pubkey = XOnlyPublicKey::from_str(&self.pubkey).unwrap();
|
||||
let verify = SECP.verify_schnorr(&sig, &msg, &pubkey);
|
||||
matches!(verify, Ok(()))
|
||||
if let Ok(pubkey) = XOnlyPublicKey::from_str(&self.pubkey) {
|
||||
let verify = SECP.verify_schnorr(&sig, &msg, &pubkey);
|
||||
matches!(verify, Ok(()))
|
||||
} else {
|
||||
debug!("Client sent malformed pubkey");
|
||||
false
|
||||
}
|
||||
} else {
|
||||
warn!("Error converting digest to secp256k1 message");
|
||||
info!("Error converting digest to secp256k1 message");
|
||||
false
|
||||
}
|
||||
}
|
||||
@@ -162,36 +215,18 @@ impl Event {
|
||||
serde_json::Value::Array(tags)
|
||||
}
|
||||
|
||||
/// Get a list of event tags.
|
||||
pub fn get_event_tags(&self) -> Vec<&str> {
|
||||
let mut etags = vec![];
|
||||
for t in self.tags.iter() {
|
||||
if t.len() >= 2 && t.get(0).unwrap() == "e" {
|
||||
etags.push(&t.get(1).unwrap()[..]);
|
||||
}
|
||||
/// Determine if the given tag and value set intersect with tags in this event.
|
||||
pub fn generic_tag_val_intersect(&self, tagname: &str, check: &HashSet<String>) -> bool {
|
||||
match &self.tagidx {
|
||||
Some(idx) => match idx.get(tagname) {
|
||||
Some(valset) => {
|
||||
let common = valset.intersection(check);
|
||||
common.count() > 0
|
||||
}
|
||||
None => false,
|
||||
},
|
||||
None => false,
|
||||
}
|
||||
etags
|
||||
}
|
||||
|
||||
/// Get a list of pubkey/petname tags.
|
||||
pub fn get_pubkey_tags(&self) -> Vec<&str> {
|
||||
let mut ptags = vec![];
|
||||
for t in self.tags.iter() {
|
||||
if t.len() >= 2 && t.get(0).unwrap() == "p" {
|
||||
ptags.push(&t.get(1).unwrap()[..]);
|
||||
}
|
||||
}
|
||||
ptags
|
||||
}
|
||||
|
||||
/// Check if a given event is referenced in an event tag.
|
||||
pub fn event_tag_match(&self, eventid: &str) -> bool {
|
||||
self.get_event_tags().contains(&eventid)
|
||||
}
|
||||
|
||||
/// Check if a given event is referenced in an event tag.
|
||||
pub fn pubkey_tag_match(&self, pubkey: &str) -> bool {
|
||||
self.get_pubkey_tags().contains(&pubkey)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -207,6 +242,7 @@ mod tests {
|
||||
tags: vec![],
|
||||
content: "".to_owned(),
|
||||
sig: "0".to_owned(),
|
||||
tagidx: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -229,7 +265,8 @@ mod tests {
|
||||
#[test]
|
||||
fn empty_event_tag_match() -> Result<()> {
|
||||
let event = simple_event();
|
||||
assert!(!event.event_tag_match("foo"));
|
||||
assert!(!event
|
||||
.generic_tag_val_intersect("e", &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -237,7 +274,14 @@ mod tests {
|
||||
fn single_event_tag_match() -> Result<()> {
|
||||
let mut event = simple_event();
|
||||
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
|
||||
assert!(event.event_tag_match("foo"));
|
||||
event.build_index();
|
||||
assert_eq!(
|
||||
event.generic_tag_val_intersect(
|
||||
"e",
|
||||
&HashSet::from(["foo".to_owned(), "bar".to_owned()])
|
||||
),
|
||||
true
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -281,6 +325,7 @@ mod tests {
|
||||
tags: vec![],
|
||||
content: "this is a test".to_owned(),
|
||||
sig: "abcde".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
let c = e.to_canonical();
|
||||
let expected = Some(r#"[0,"012345",501234,1,[],"this is a test"]"#.to_owned());
|
||||
@@ -304,6 +349,7 @@ mod tests {
|
||||
],
|
||||
content: "this is a test".to_owned(),
|
||||
sig: "abcde".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
let c = e.to_canonical();
|
||||
let expected_json = r###"[0,"012345",501234,1,[["#e","aoeu"],["#p","aaaa","ws://example.com"]],"this is a test"]"###;
|
||||
|
158
src/hexrange.rs
Normal file
158
src/hexrange.rs
Normal file
@@ -0,0 +1,158 @@
|
||||
//! Utilities for searching hexadecimal
|
||||
use crate::utils::is_hex;
|
||||
use hex;
|
||||
|
||||
/// Types of hexadecimal queries.
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
pub enum HexSearch {
|
||||
// when no range is needed, exact 32-byte
|
||||
Exact(Vec<u8>),
|
||||
// lower (inclusive) and upper range (exclusive)
|
||||
Range(Vec<u8>, Vec<u8>),
|
||||
// lower bound only, upper bound is MAX inclusive
|
||||
LowerOnly(Vec<u8>),
|
||||
}
|
||||
|
||||
/// Check if a string contains only f chars
|
||||
fn is_all_fs(s: &str) -> bool {
|
||||
s.chars().all(|x| x == 'f' || x == 'F')
|
||||
}
|
||||
|
||||
/// Find the next hex sequence greater than the argument.
|
||||
pub fn hex_range(s: &str) -> Option<HexSearch> {
|
||||
// handle special cases
|
||||
if !is_hex(s) || s.len() > 64 {
|
||||
return None;
|
||||
}
|
||||
if s.len() == 64 {
|
||||
return Some(HexSearch::Exact(hex::decode(s).ok()?));
|
||||
}
|
||||
// if s is odd, add a zero
|
||||
let mut hash_base = s.to_owned();
|
||||
let mut odd = hash_base.len() % 2 != 0;
|
||||
if odd {
|
||||
// extend the string to make it even
|
||||
hash_base.push('0');
|
||||
}
|
||||
let base = hex::decode(hash_base).ok()?;
|
||||
// check for all ff's
|
||||
if is_all_fs(s) {
|
||||
// there is no higher bound, we only want to search for blobs greater than this.
|
||||
return Some(HexSearch::LowerOnly(base));
|
||||
}
|
||||
|
||||
// return a range
|
||||
let mut upper = base.clone();
|
||||
let mut byte_len = upper.len();
|
||||
|
||||
// for odd strings, we made them longer, but we want to increment the upper char (+16).
|
||||
// we know we can do this without overflowing because we explicitly set the bottom half to 0's.
|
||||
while byte_len > 0 {
|
||||
byte_len -= 1;
|
||||
// check if byte can be incremented, or if we need to carry.
|
||||
let b = upper[byte_len];
|
||||
if b == u8::MAX {
|
||||
// reset and carry
|
||||
upper[byte_len] = 0;
|
||||
} else if odd {
|
||||
// check if first char in this byte is NOT 'f'
|
||||
if b < 240 {
|
||||
upper[byte_len] = b + 16; // bump up the first character in this byte
|
||||
// increment done, stop iterating through the vec
|
||||
break;
|
||||
} else {
|
||||
// if it is 'f', reset the byte to 0 and do a carry
|
||||
// reset and carry
|
||||
upper[byte_len] = 0;
|
||||
}
|
||||
// done with odd logic, so don't repeat this
|
||||
odd = false;
|
||||
} else {
|
||||
// bump up the first character in this byte
|
||||
upper[byte_len] = b + 1;
|
||||
// increment done, stop iterating
|
||||
break;
|
||||
}
|
||||
}
|
||||
Some(HexSearch::Range(base, upper))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn hex_range_exact() -> Result<()> {
|
||||
let hex = "abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00";
|
||||
let r = hex_range(hex);
|
||||
assert_eq!(
|
||||
r,
|
||||
Some(HexSearch::Exact(hex::decode(hex).expect("invalid hex")))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
fn hex_full_range() -> Result<()> {
|
||||
let hex = "aaaa";
|
||||
let hex_upper = "aaab";
|
||||
let r = hex_range(hex);
|
||||
assert_eq!(
|
||||
r,
|
||||
Some(HexSearch::Range(
|
||||
hex::decode(hex).expect("invalid hex"),
|
||||
hex::decode(hex_upper).expect("invalid hex")
|
||||
))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hex_full_range_odd() -> Result<()> {
|
||||
let r = hex_range("abc");
|
||||
assert_eq!(
|
||||
r,
|
||||
Some(HexSearch::Range(
|
||||
hex::decode("abc0").expect("invalid hex"),
|
||||
hex::decode("abd0").expect("invalid hex")
|
||||
))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hex_full_range_odd_end_f() -> Result<()> {
|
||||
let r = hex_range("abf");
|
||||
assert_eq!(
|
||||
r,
|
||||
Some(HexSearch::Range(
|
||||
hex::decode("abf0").expect("invalid hex"),
|
||||
hex::decode("ac00").expect("invalid hex")
|
||||
))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hex_no_upper() -> Result<()> {
|
||||
let r = hex_range("ffff");
|
||||
assert_eq!(
|
||||
r,
|
||||
Some(HexSearch::LowerOnly(
|
||||
hex::decode("ffff").expect("invalid hex")
|
||||
))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hex_no_upper_odd() -> Result<()> {
|
||||
let r = hex_range("fff");
|
||||
assert_eq!(
|
||||
r,
|
||||
Some(HexSearch::LowerOnly(
|
||||
hex::decode("fff0").expect("invalid hex")
|
||||
))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
43
src/info.rs
Normal file
43
src/info.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
//! Relay metadata using NIP-11
|
||||
/// Relay Info
|
||||
use crate::config;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub const CARGO_PKG_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct RelayInfo {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub id: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub description: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pubkey: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub contact: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub supported_nips: Option<Vec<i64>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub software: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub version: Option<String>,
|
||||
}
|
||||
|
||||
/// Convert an Info configuration into public Relay Info
|
||||
impl From<config::Info> for RelayInfo {
|
||||
fn from(i: config::Info) -> Self {
|
||||
RelayInfo {
|
||||
id: i.relay_url,
|
||||
name: i.name,
|
||||
description: i.description,
|
||||
pubkey: i.pubkey,
|
||||
contact: i.contact,
|
||||
supported_nips: Some(vec![1, 2, 11]),
|
||||
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
|
||||
version: CARGO_PKG_VERSION.map(|x| x.to_owned()),
|
||||
}
|
||||
}
|
||||
}
|
@@ -4,5 +4,9 @@ pub mod conn;
|
||||
pub mod db;
|
||||
pub mod error;
|
||||
pub mod event;
|
||||
pub mod protostream;
|
||||
pub mod hexrange;
|
||||
pub mod info;
|
||||
pub mod nip05;
|
||||
pub mod schema;
|
||||
pub mod subscription;
|
||||
pub mod utils;
|
||||
|
340
src/main.rs
340
src/main.rs
@@ -1,6 +1,7 @@
|
||||
//! Server process
|
||||
use futures::SinkExt;
|
||||
use futures::StreamExt;
|
||||
use hyper::header::ACCEPT;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::upgrade::Upgraded;
|
||||
use hyper::{
|
||||
@@ -8,46 +9,57 @@ use hyper::{
|
||||
};
|
||||
use log::*;
|
||||
use nostr_rs_relay::close::Close;
|
||||
use nostr_rs_relay::close::CloseCmd;
|
||||
use nostr_rs_relay::config;
|
||||
use nostr_rs_relay::conn;
|
||||
use nostr_rs_relay::db;
|
||||
use nostr_rs_relay::db::SubmittedEvent;
|
||||
use nostr_rs_relay::error::{Error, Result};
|
||||
use nostr_rs_relay::event::Event;
|
||||
use nostr_rs_relay::protostream;
|
||||
use nostr_rs_relay::protostream::NostrMessage::*;
|
||||
use nostr_rs_relay::protostream::NostrResponse::*;
|
||||
use nostr_rs_relay::event::EventCmd;
|
||||
use nostr_rs_relay::info::RelayInfo;
|
||||
use nostr_rs_relay::nip05;
|
||||
use nostr_rs_relay::subscription::Subscription;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::convert::Infallible;
|
||||
use std::env;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use tokio::runtime::Builder;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::broadcast::{Receiver, Sender};
|
||||
use tokio::sync::broadcast::{self, Receiver, Sender};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio_tungstenite::WebSocketStream;
|
||||
use tungstenite::error::Error as WsError;
|
||||
use tungstenite::handshake;
|
||||
use tungstenite::protocol::Message;
|
||||
use tungstenite::protocol::WebSocketConfig;
|
||||
|
||||
/// Return a requested DB name from command line arguments.
|
||||
fn db_from_args(args: Vec<String>) -> Option<String> {
|
||||
if args.len() == 3 && args.get(1) == Some(&"--db".to_owned()) {
|
||||
return args.get(2).map(|x| x.to_owned());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Handle arbitrary HTTP requests, including for WebSocket upgrades.
|
||||
async fn handle_web_request(
|
||||
mut request: Request<Body>,
|
||||
pool: db::SqlitePool,
|
||||
remote_addr: SocketAddr,
|
||||
broadcast: Sender<Event>,
|
||||
event_tx: tokio::sync::mpsc::Sender<Event>,
|
||||
event_tx: tokio::sync::mpsc::Sender<SubmittedEvent>,
|
||||
shutdown: Receiver<()>,
|
||||
) -> Result<Response<Body>, Infallible> {
|
||||
match (
|
||||
request.uri().path(),
|
||||
request.headers().contains_key(header::UPGRADE),
|
||||
) {
|
||||
//if the request is ws_echo and the request headers contains an Upgrade key
|
||||
// Request for / as websocket
|
||||
("/", true) => {
|
||||
debug!("websocket with upgrade request");
|
||||
//assume request is a handshake, so create the handshake response
|
||||
@@ -62,17 +74,25 @@ async fn handle_web_request(
|
||||
match upgrade::on(&mut request).await {
|
||||
//if successfully upgraded
|
||||
Ok(upgraded) => {
|
||||
// set WebSocket configuration options
|
||||
let mut config = WebSocketConfig::default();
|
||||
{
|
||||
let settings = config::SETTINGS.read().unwrap();
|
||||
config.max_message_size = settings.limits.max_ws_message_bytes;
|
||||
config.max_frame_size = settings.limits.max_ws_frame_bytes;
|
||||
}
|
||||
//create a websocket stream from the upgraded object
|
||||
let ws_stream = WebSocketStream::from_raw_socket(
|
||||
//pass the upgraded object
|
||||
//as the base layer stream of the Websocket
|
||||
upgraded,
|
||||
tokio_tungstenite::tungstenite::protocol::Role::Server,
|
||||
None,
|
||||
Some(config),
|
||||
)
|
||||
.await;
|
||||
|
||||
tokio::spawn(nostr_server(
|
||||
ws_stream, broadcast, event_tx, shutdown,
|
||||
pool, ws_stream, broadcast, event_tx, shutdown,
|
||||
));
|
||||
}
|
||||
Err(e) => println!(
|
||||
@@ -96,10 +116,30 @@ async fn handle_web_request(
|
||||
};
|
||||
Ok::<_, Infallible>(response)
|
||||
}
|
||||
// Request for Relay info
|
||||
("/", false) => {
|
||||
// handle request at root with no upgrade header
|
||||
// Check if this is a nostr server info request
|
||||
let accept_header = &request.headers().get(ACCEPT);
|
||||
// check if application/nostr+json is included
|
||||
if let Some(media_types) = accept_header {
|
||||
if let Ok(mt_str) = media_types.to_str() {
|
||||
if mt_str.contains("application/nostr+json") {
|
||||
let config = config::SETTINGS.read().unwrap();
|
||||
// build a relay info response
|
||||
debug!("Responding to server info request");
|
||||
let rinfo = RelayInfo::from(config.info.clone());
|
||||
let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap());
|
||||
return Ok(Response::builder()
|
||||
.status(200)
|
||||
.header("Content-Type", "application/nostr+json")
|
||||
.body(b)
|
||||
.unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Response::new(Body::from(
|
||||
"This is a Nostr relay.\n".to_string(),
|
||||
"Please use a Nostr client to connect.",
|
||||
)))
|
||||
}
|
||||
(_, _) => {
|
||||
@@ -137,15 +177,45 @@ fn main() -> Result<(), Error> {
|
||||
*settings = c;
|
||||
}
|
||||
|
||||
let config = config::SETTINGS.read().unwrap();
|
||||
let settings = config::SETTINGS.read().unwrap();
|
||||
trace!("Config: {:?}", settings);
|
||||
// do some config validation.
|
||||
if !Path::new(&config.database.data_directory).is_dir() {
|
||||
if !Path::new(&settings.database.data_directory).is_dir() {
|
||||
error!("Database directory does not exist");
|
||||
return Err(Error::DatabaseDirError);
|
||||
}
|
||||
debug!("config: {:?}", config);
|
||||
let addr = format!("{}:{}", config.network.address.trim(), config.network.port);
|
||||
let addr = format!(
|
||||
"{}:{}",
|
||||
settings.network.address.trim(),
|
||||
settings.network.port
|
||||
);
|
||||
let socket_addr = addr.parse().expect("listening address not valid");
|
||||
// address whitelisting settings
|
||||
if let Some(addr_whitelist) = &settings.authorization.pubkey_whitelist {
|
||||
info!(
|
||||
"Event publishing restricted to {} pubkey(s)",
|
||||
addr_whitelist.len()
|
||||
);
|
||||
}
|
||||
// check if NIP-05 enforced user verification is on
|
||||
if settings.verified_users.is_active() {
|
||||
info!(
|
||||
"NIP-05 user verification mode:{:?}",
|
||||
settings.verified_users.mode
|
||||
);
|
||||
if let Some(d) = settings.verified_users.verify_update_duration() {
|
||||
info!("NIP-05 check user verification every: {:?}", d);
|
||||
}
|
||||
if let Some(d) = settings.verified_users.verify_expiration_duration() {
|
||||
info!("NIP-05 user verification expires after: {:?}", d);
|
||||
}
|
||||
if let Some(wl) = &settings.verified_users.domain_whitelist {
|
||||
info!("NIP-05 domain whitelist: {:?}", wl);
|
||||
}
|
||||
if let Some(bl) = &settings.verified_users.domain_blacklist {
|
||||
info!("NIP-05 domain blacklist: {:?}", bl);
|
||||
}
|
||||
}
|
||||
// configure tokio runtime
|
||||
let rt = Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
@@ -163,25 +233,62 @@ fn main() -> Result<(), Error> {
|
||||
let (bcast_tx, _) = broadcast::channel::<Event>(settings.limits.broadcast_buffer);
|
||||
// validated events that need to be persisted are sent to the
|
||||
// database on via this channel.
|
||||
let (event_tx, event_rx) = mpsc::channel::<Event>(settings.limits.event_persist_buffer);
|
||||
let (event_tx, event_rx) =
|
||||
mpsc::channel::<SubmittedEvent>(settings.limits.event_persist_buffer);
|
||||
// establish a channel for letting all threads now about a
|
||||
// requested server shutdown.
|
||||
let (invoke_shutdown, _) = broadcast::channel::<()>(1);
|
||||
let ctrl_c_shutdown = invoke_shutdown.clone();
|
||||
let (invoke_shutdown, shutdown_listen) = broadcast::channel::<()>(1);
|
||||
// create a channel for sending any new metadata event. These
|
||||
// will get processed relatively slowly (a potentially
|
||||
// multi-second blocking HTTP call) on a single thread, so we
|
||||
// buffer requests on the channel. No harm in dropping events
|
||||
// here, since we are protecting against DoS. This can make
|
||||
// it difficult to setup initial metadata in bulk, since
|
||||
// overwhelming this will drop events and won't register
|
||||
// metadata events.
|
||||
let (metadata_tx, metadata_rx) = broadcast::channel::<Event>(4096);
|
||||
// start the database writer thread. Give it a channel for
|
||||
// writing events, and for publishing events that have been
|
||||
// written (to all connected clients).
|
||||
db::db_writer(
|
||||
event_rx,
|
||||
bcast_tx.clone(),
|
||||
metadata_tx.clone(),
|
||||
shutdown_listen,
|
||||
)
|
||||
.await;
|
||||
info!("db writer created");
|
||||
|
||||
// create a nip-05 verifier thread
|
||||
let verifier_opt = nip05::Verifier::new(metadata_rx, bcast_tx.clone());
|
||||
if let Ok(mut v) = verifier_opt {
|
||||
if settings.verified_users.is_active() {
|
||||
tokio::task::spawn(async move {
|
||||
info!("starting up NIP-05 verifier...");
|
||||
v.run().await;
|
||||
});
|
||||
}
|
||||
}
|
||||
// // listen for ctrl-c interruupts
|
||||
let ctrl_c_shutdown = invoke_shutdown.clone();
|
||||
tokio::spawn(async move {
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
info!("shutting down due to SIGINT");
|
||||
ctrl_c_shutdown.send(()).ok();
|
||||
});
|
||||
// start the database writer thread. Give it a channel for
|
||||
// writing events, and for publishing events that have been
|
||||
// written (to all connected clients).
|
||||
db::db_writer(event_rx, bcast_tx.clone(), invoke_shutdown.subscribe()).await;
|
||||
info!("db writer created");
|
||||
// build a connection pool for sqlite connections
|
||||
let pool = db::build_pool(
|
||||
"client query",
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY
|
||||
| rusqlite::OpenFlags::SQLITE_OPEN_SHARED_CACHE,
|
||||
settings.database.min_conn,
|
||||
settings.database.max_conn,
|
||||
true,
|
||||
);
|
||||
// A `Service` is needed for every connection, so this
|
||||
// creates one from our `handle_request` function.
|
||||
let make_svc = make_service_fn(|conn: &AddrStream| {
|
||||
let svc_pool = pool.clone();
|
||||
let remote_addr = conn.remote_addr();
|
||||
let bcast = bcast_tx.clone();
|
||||
let event = event_tx.clone();
|
||||
@@ -191,6 +298,7 @@ fn main() -> Result<(), Error> {
|
||||
Ok::<_, Infallible>(service_fn(move |request: Request<Body>| {
|
||||
handle_web_request(
|
||||
request,
|
||||
svc_pool.clone(),
|
||||
remote_addr,
|
||||
bcast.clone(),
|
||||
event.clone(),
|
||||
@@ -211,55 +319,113 @@ fn main() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Nostr protocol messages from a client
|
||||
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
|
||||
#[serde(untagged)]
|
||||
pub enum NostrMessage {
|
||||
/// An `EVENT` message
|
||||
EventMsg(EventCmd),
|
||||
/// A `REQ` message
|
||||
SubMsg(Subscription),
|
||||
/// A `CLOSE` message
|
||||
CloseMsg(CloseCmd),
|
||||
}
|
||||
|
||||
/// Convert Message to NostrMessage
|
||||
fn convert_to_msg(msg: String) -> Result<NostrMessage> {
|
||||
let config = config::SETTINGS.read().unwrap();
|
||||
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
|
||||
match parsed_res {
|
||||
Ok(m) => {
|
||||
if let NostrMessage::EventMsg(_) = m {
|
||||
if let Some(max_size) = config.limits.max_event_bytes {
|
||||
// check length, ensure that some max size is set.
|
||||
if msg.len() > max_size && max_size > 0 {
|
||||
return Err(Error::EventMaxLengthError(msg.len()));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(m)
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("proto parse error: {:?}", e);
|
||||
debug!("parse error on message: {}", msg.trim());
|
||||
Err(Error::ProtoParseError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle new client connections. This runs through an event loop
|
||||
/// for all client communication.
|
||||
async fn nostr_server(
|
||||
ws_stream: WebSocketStream<Upgraded>,
|
||||
pool: db::SqlitePool,
|
||||
mut ws_stream: WebSocketStream<Upgraded>,
|
||||
broadcast: Sender<Event>,
|
||||
event_tx: tokio::sync::mpsc::Sender<Event>,
|
||||
event_tx: mpsc::Sender<SubmittedEvent>,
|
||||
mut shutdown: Receiver<()>,
|
||||
) {
|
||||
// get a broadcast channel for clients to communicate on
|
||||
let mut bcast_rx = broadcast.subscribe();
|
||||
let mut config = WebSocketConfig::default();
|
||||
{
|
||||
let settings = config::SETTINGS.read().unwrap();
|
||||
config.max_message_size = settings.limits.max_ws_message_bytes;
|
||||
config.max_frame_size = settings.limits.max_ws_frame_bytes;
|
||||
}
|
||||
// upgrade the TCP connection to WebSocket
|
||||
//let conn = tokio_tungstenite::accept_async_with_config(stream, Some(config)).await;
|
||||
//let ws_stream = conn.expect("websocket handshake error");
|
||||
// wrap websocket into a stream & sink of Nostr protocol messages
|
||||
let mut nostr_stream = protostream::wrap_ws_in_nostr(ws_stream);
|
||||
// Track internal client state
|
||||
let mut conn = conn::ClientConn::new();
|
||||
let cid = conn.get_client_prefix();
|
||||
// Create a channel for receiving query results from the database.
|
||||
// we will send out the tx handle to any query we generate.
|
||||
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(256);
|
||||
// Create channel for receiving NOTICEs
|
||||
let (notice_tx, mut notice_rx) = mpsc::channel::<String>(32);
|
||||
|
||||
// maintain a hashmap of a oneshot channel for active subscriptions.
|
||||
// when these subscriptions are cancelled, make a message
|
||||
// available to the executing query so it knows to stop.
|
||||
//let (abandon_query_tx, _) = oneshot::channel::<()>();
|
||||
|
||||
// last time this client sent data
|
||||
let mut last_message_time = Instant::now();
|
||||
|
||||
// ping interval (every 5 minutes)
|
||||
let default_ping_dur = Duration::from_secs(300);
|
||||
|
||||
// disconnect after 20 minutes without a ping response or event.
|
||||
let max_quiet_time = Duration::from_secs(60 * 20);
|
||||
|
||||
let start = tokio::time::Instant::now() + default_ping_dur;
|
||||
let mut ping_interval = tokio::time::interval_at(start, default_ping_dur);
|
||||
|
||||
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
|
||||
// for stats, keep track of how many events the client published,
|
||||
// and how many it received from queries.
|
||||
let mut client_published_event_count: usize = 0;
|
||||
let mut client_received_event_count: usize = 0;
|
||||
info!("new connection for client: {}", cid);
|
||||
info!("new connection for client: {:?}", cid);
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = shutdown.recv() => {
|
||||
// server shutting down, exit loop
|
||||
break;
|
||||
},
|
||||
_ = ping_interval.tick() => {
|
||||
// check how long since we talked to client
|
||||
// if it has been too long, disconnect
|
||||
if last_message_time.elapsed() > max_quiet_time {
|
||||
debug!("ending connection due to lack of client ping response");
|
||||
break;
|
||||
}
|
||||
// Send a ping
|
||||
ws_stream.send(Message::Ping(Vec::new())).await.ok();
|
||||
},
|
||||
Some(notice_msg) = notice_rx.recv() => {
|
||||
let n = notice_msg.to_string().replace("\"", "");
|
||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", n))).await.ok();
|
||||
},
|
||||
Some(query_result) = query_rx.recv() => {
|
||||
// database informed us of a query result we asked for
|
||||
let res = EventRes(query_result.sub_id,query_result.event);
|
||||
client_received_event_count += 1;
|
||||
nostr_stream.send(res).await.ok();
|
||||
// send a result
|
||||
let subesc = query_result.sub_id.replace("\"", "");
|
||||
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
|
||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||
},
|
||||
// TODO: consider logging the LaggedRecv error
|
||||
Ok(global_event) = bcast_rx.recv() => {
|
||||
// an event has been broadcast to all clients
|
||||
// first check if there is a subscription for this event.
|
||||
@@ -268,39 +434,66 @@ async fn nostr_server(
|
||||
// TODO: serialize at broadcast time, instead of
|
||||
// once for each consumer.
|
||||
if let Ok(event_str) = serde_json::to_string(&global_event) {
|
||||
debug!("sub match: client: {}, sub: {}, event: {}",
|
||||
debug!("sub match: client: {:?}, sub: {:?}, event: {:?}",
|
||||
cid, s,
|
||||
global_event.get_event_id_prefix());
|
||||
// create an event response and send it
|
||||
let res = EventRes(s.to_owned(),event_str);
|
||||
nostr_stream.send(res).await.ok();
|
||||
let subesc = s.replace("\"", "");
|
||||
ws_stream.send(Message::Text(format!("[\"EVENT\",\"{}\",{}]", subesc, event_str))).await.ok();
|
||||
//nostr_stream.send(res).await.ok();
|
||||
} else {
|
||||
warn!("could not convert event to string");
|
||||
warn!("could not serialize event {:?}", global_event.get_event_id_prefix());
|
||||
}
|
||||
}
|
||||
},
|
||||
// check if this client has a subscription
|
||||
proto_next = nostr_stream.next() => {
|
||||
match proto_next {
|
||||
Some(Ok(EventMsg(ec))) => {
|
||||
ws_next = ws_stream.next() => {
|
||||
// update most recent message time for client
|
||||
last_message_time = Instant::now();
|
||||
// Consume text messages from the client, parse into Nostr messages.
|
||||
let nostr_msg = match ws_next {
|
||||
Some(Ok(Message::Text(m))) => {
|
||||
convert_to_msg(m)
|
||||
},
|
||||
Some(Ok(Message::Binary(_))) => {
|
||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "binary messages are not accepted"))).await.ok();
|
||||
continue;
|
||||
},
|
||||
Some(Ok(Message::Ping(_))) | Some(Ok(Message::Pong(_))) => {
|
||||
// get a ping/pong, ignore
|
||||
continue;
|
||||
},
|
||||
None | Some(Ok(Message::Close(_))) | Some(Err(WsError::AlreadyClosed)) | Some(Err(WsError::ConnectionClosed)) => {
|
||||
debug!("normal websocket close from client: {:?}",cid);
|
||||
break;
|
||||
},
|
||||
x => {
|
||||
info!("message was: {:?} (ignoring)", x);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// convert ws_next into proto_next
|
||||
match nostr_msg {
|
||||
Ok(NostrMessage::EventMsg(ec)) => {
|
||||
// An EventCmd needs to be validated to be converted into an Event
|
||||
// handle each type of message
|
||||
let parsed : Result<Event> = Result::<Event>::from(ec);
|
||||
match parsed {
|
||||
Ok(e) => {
|
||||
let id_prefix:String = e.id.chars().take(8).collect();
|
||||
debug!("successfully parsed/validated event: {} from client: {}", id_prefix, cid);
|
||||
// Write this to the database
|
||||
event_tx.send(e.clone()).await.ok();
|
||||
debug!("successfully parsed/validated event: {:?} from client: {:?}", id_prefix, cid);
|
||||
// Write this to the database.
|
||||
let submit_event = SubmittedEvent { event: e.clone(), notice_tx: notice_tx.clone() };
|
||||
event_tx.send(submit_event).await.ok();
|
||||
client_published_event_count += 1;
|
||||
},
|
||||
Err(_) => {
|
||||
info!("client {} sent an invalid event", cid);
|
||||
nostr_stream.send(NoticeRes("event was invalid".to_owned())).await.ok();
|
||||
info!("client {:?} sent an invalid event", cid);
|
||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "event was invalid"))).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
Some(Ok(SubMsg(s))) => {
|
||||
Ok(NostrMessage::SubMsg(s)) => {
|
||||
debug!("client {} requesting a subscription", cid);
|
||||
// subscription handling consists of:
|
||||
// * registering the subscription so future events can be matched
|
||||
@@ -309,18 +502,23 @@ async fn nostr_server(
|
||||
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
|
||||
match conn.subscribe(s.clone()) {
|
||||
Ok(()) => {
|
||||
running_queries.insert(s.id.to_owned(), abandon_query_tx);
|
||||
// when we insert, if there was a previous query running with the same name, cancel it.
|
||||
if let Some(previous_query) = running_queries.insert(s.id.to_owned(), abandon_query_tx) {
|
||||
previous_query.send(()).ok();
|
||||
}
|
||||
// start a database query
|
||||
db::db_query(s, query_tx.clone(), abandon_query_rx).await;
|
||||
// show pool stats
|
||||
debug!("DB pool stats: {:?}", pool.state());
|
||||
db::db_query(s, pool.get().expect("could not get connection"), query_tx.clone(), abandon_query_rx).await;
|
||||
},
|
||||
Err(e) => {
|
||||
info!("Subscription error: {}", e);
|
||||
nostr_stream.send(NoticeRes(format!("{}",e))).await.ok();
|
||||
|
||||
let s = e.to_string().replace("\"", "");
|
||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", s))).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
Some(Ok(CloseMsg(cc))) => {
|
||||
Ok(NostrMessage::CloseMsg(cc)) => {
|
||||
// closing a request simply removes the subscription.
|
||||
let parsed : Result<Close> = Result::<Close>::from(cc);
|
||||
match parsed {
|
||||
@@ -337,24 +535,24 @@ async fn nostr_server(
|
||||
},
|
||||
Err(_) => {
|
||||
info!("invalid command ignored");
|
||||
|
||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "could not parse command"))).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
None => {
|
||||
debug!("normal websocket close from client: {}",cid);
|
||||
break;
|
||||
},
|
||||
Some(Err(Error::ConnError)) => {
|
||||
debug!("got connection close/error, disconnecting client: {}",cid);
|
||||
Err(Error::ConnError) => {
|
||||
debug!("got connection close/error, disconnecting client: {:?}",cid);
|
||||
break;
|
||||
}
|
||||
Some(Err(Error::EventMaxLengthError(s))) => {
|
||||
info!("client {} sent event larger ({} bytes) than max size", cid, s);
|
||||
nostr_stream.send(NoticeRes("event exceeded max size".to_owned())).await.ok();
|
||||
Err(Error::EventMaxLengthError(s)) => {
|
||||
info!("client {:?} sent event larger ({} bytes) than max size", cid, s);
|
||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "event exceeded max size"))).await.ok();
|
||||
},
|
||||
Some(Err(e)) => {
|
||||
info!("got non-fatal error from client: {}, error: {:?}", cid, e);
|
||||
Err(Error::ProtoParseError) => {
|
||||
info!("client {:?} sent event that could not be parsed", cid);
|
||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "could not parse command"))).await.ok();
|
||||
},
|
||||
Err(e) => {
|
||||
info!("got non-fatal error from client: {:?}, error: {:?}", cid, e);
|
||||
},
|
||||
}
|
||||
},
|
||||
@@ -365,7 +563,7 @@ async fn nostr_server(
|
||||
stop_tx.send(()).ok();
|
||||
}
|
||||
info!(
|
||||
"stopping connection for client: {} (client sent {} event(s), received {})",
|
||||
"stopping connection for client: {:?} (client sent {} event(s), received {})",
|
||||
cid, client_published_event_count, client_received_event_count
|
||||
);
|
||||
}
|
||||
|
818
src/nip05.rs
Normal file
818
src/nip05.rs
Normal file
@@ -0,0 +1,818 @@
|
||||
//! User verification using NIP-05 names
|
||||
//!
|
||||
//! NIP-05 defines a mechanism for authors to associate an internet
|
||||
//! address with their public key, in metadata events. This module
|
||||
//! consumes a stream of metadata events, and keeps a database table
|
||||
//! updated with the current NIP-05 verification status.
|
||||
use crate::config::SETTINGS;
|
||||
use crate::db;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::event::Event;
|
||||
use crate::utils::unix_time;
|
||||
use hyper::body::HttpBody;
|
||||
use hyper::client::connect::HttpConnector;
|
||||
use hyper::Client;
|
||||
use hyper_tls::HttpsConnector;
|
||||
use log::*;
|
||||
use rand::Rng;
|
||||
use rusqlite::params;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use std::time::SystemTime;
|
||||
use tokio::time::Interval;
|
||||
|
||||
/// NIP-05 verifier state
|
||||
pub struct Verifier {
|
||||
/// Metadata events for us to inspect
|
||||
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
|
||||
/// Newly validated events get written and then broadcast on this channel to subscribers
|
||||
event_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
/// SQLite read query pool
|
||||
read_pool: db::SqlitePool,
|
||||
/// SQLite write query pool
|
||||
write_pool: db::SqlitePool,
|
||||
/// HTTP client
|
||||
client: hyper::Client<HttpsConnector<HttpConnector>, hyper::Body>,
|
||||
/// After all accounts are updated, wait this long before checking again.
|
||||
wait_after_finish: Duration,
|
||||
/// Minimum amount of time between HTTP queries
|
||||
http_wait_duration: Duration,
|
||||
/// Interval for updating verification records
|
||||
reverify_interval: Interval,
|
||||
}
|
||||
|
||||
/// A NIP-05 identifier is a local part and domain.
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
pub struct Nip05Name {
|
||||
local: String,
|
||||
domain: String,
|
||||
}
|
||||
|
||||
impl Nip05Name {
|
||||
/// Does this name represent the entire domain?
|
||||
pub fn is_domain_only(&self) -> bool {
|
||||
self.local == "_"
|
||||
}
|
||||
|
||||
/// Determine the URL to query for verification
|
||||
fn to_url(&self) -> Option<http::Uri> {
|
||||
format!(
|
||||
"https://{}/.well-known/nostr.json?name={}",
|
||||
self.domain, self.local
|
||||
)
|
||||
.parse::<http::Uri>()
|
||||
.ok()
|
||||
}
|
||||
}
|
||||
|
||||
// Parsing Nip05Names from strings
|
||||
impl std::convert::TryFrom<&str> for Nip05Name {
|
||||
type Error = Error;
|
||||
fn try_from(inet: &str) -> Result<Self, Self::Error> {
|
||||
// break full name at the @ boundary.
|
||||
let components: Vec<&str> = inet.split('@').collect();
|
||||
if components.len() != 2 {
|
||||
Err(Error::CustomError("too many/few components".to_owned()))
|
||||
} else {
|
||||
// check if local name is valid
|
||||
let local = components[0];
|
||||
let domain = components[1];
|
||||
if local
|
||||
.chars()
|
||||
.all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.')
|
||||
{
|
||||
if domain
|
||||
.chars()
|
||||
.all(|x| x.is_alphanumeric() || x == '-' || x == '.')
|
||||
{
|
||||
Ok(Nip05Name {
|
||||
local: local.to_owned(),
|
||||
domain: domain.to_owned(),
|
||||
})
|
||||
} else {
|
||||
Err(Error::CustomError(
|
||||
"invalid character in domain part".to_owned(),
|
||||
))
|
||||
}
|
||||
} else {
|
||||
Err(Error::CustomError(
|
||||
"invalid character in local part".to_owned(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Nip05Name {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}@{}", self.local, self.domain)
|
||||
}
|
||||
}
|
||||
|
||||
// Current time, with a slight foward jitter in seconds
|
||||
fn now_jitter(sec: u64) -> u64 {
|
||||
// random time between now, and 10min in future.
|
||||
let mut rng = rand::thread_rng();
|
||||
let jitter_amount = rng.gen_range(0..sec);
|
||||
let now = unix_time();
|
||||
now.saturating_add(jitter_amount)
|
||||
}
|
||||
|
||||
/// Check if the specified username and address are present and match in this response body
|
||||
fn body_contains_user(username: &str, address: &str, bytes: hyper::body::Bytes) -> Result<bool> {
|
||||
// convert the body into json
|
||||
let body: serde_json::Value = serde_json::from_slice(&bytes)?;
|
||||
// ensure we have a names object.
|
||||
let names_map = body
|
||||
.as_object()
|
||||
.and_then(|x| x.get("names"))
|
||||
.and_then(|x| x.as_object())
|
||||
.ok_or_else(|| Error::CustomError("not a map".to_owned()))?;
|
||||
// get the pubkey for the requested user
|
||||
let check_name = names_map.get(username).and_then(|x| x.as_str());
|
||||
// ensure the address is a match
|
||||
Ok(check_name.map(|x| x == address).unwrap_or(false))
|
||||
}
|
||||
|
||||
impl Verifier {
|
||||
pub fn new(
|
||||
metadata_rx: tokio::sync::broadcast::Receiver<Event>,
|
||||
event_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
) -> Result<Self> {
|
||||
info!("creating NIP-05 verifier");
|
||||
// build a database connection for reading and writing.
|
||||
let write_pool = db::build_pool(
|
||||
"nip05 writer",
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE,
|
||||
1, // min conns
|
||||
4, // max conns
|
||||
true, // wait for DB
|
||||
);
|
||||
let read_pool = db::build_pool(
|
||||
"nip05 reader",
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
|
||||
1, // min conns
|
||||
8, // max conns
|
||||
true, // wait for DB
|
||||
);
|
||||
// setup hyper client
|
||||
let https = HttpsConnector::new();
|
||||
let client = Client::builder().build::<_, hyper::Body>(https);
|
||||
|
||||
// After all accounts have been re-verified, don't check again
|
||||
// for this long.
|
||||
let wait_after_finish = Duration::from_secs(60 * 10);
|
||||
// when we have an active queue of accounts to validate, we
|
||||
// will wait this duration between HTTP requests.
|
||||
let http_wait_duration = Duration::from_secs(1);
|
||||
// setup initial interval for re-verification. If we find
|
||||
// there is no work to be done, it will be reset to a longer
|
||||
// duration.
|
||||
let reverify_interval = tokio::time::interval(http_wait_duration);
|
||||
Ok(Verifier {
|
||||
metadata_rx,
|
||||
event_tx,
|
||||
read_pool,
|
||||
write_pool,
|
||||
client,
|
||||
wait_after_finish,
|
||||
http_wait_duration,
|
||||
reverify_interval,
|
||||
})
|
||||
}
|
||||
|
||||
/// Perform web verification against a NIP-05 name and address.
|
||||
pub async fn get_web_verification(
|
||||
&mut self,
|
||||
nip: &Nip05Name,
|
||||
pubkey: &str,
|
||||
) -> UserWebVerificationStatus {
|
||||
self.get_web_verification_res(nip, pubkey)
|
||||
.await
|
||||
.unwrap_or(UserWebVerificationStatus::Unknown)
|
||||
}
|
||||
|
||||
/// Perform web verification against an `Event` (must be metadata).
|
||||
pub async fn get_web_verification_from_event(
|
||||
&mut self,
|
||||
e: &Event,
|
||||
) -> UserWebVerificationStatus {
|
||||
let nip_parse = e.get_nip05_addr();
|
||||
if let Some(nip) = nip_parse {
|
||||
self.get_web_verification_res(&nip, &e.pubkey)
|
||||
.await
|
||||
.unwrap_or(UserWebVerificationStatus::Unknown)
|
||||
} else {
|
||||
UserWebVerificationStatus::Unknown
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform web verification, with a `Result` return.
|
||||
async fn get_web_verification_res(
|
||||
&mut self,
|
||||
nip: &Nip05Name,
|
||||
pubkey: &str,
|
||||
) -> Result<UserWebVerificationStatus> {
|
||||
// determine if this domain should be checked
|
||||
if !is_domain_allowed(&nip.domain) {
|
||||
return Ok(UserWebVerificationStatus::DomainNotAllowed);
|
||||
}
|
||||
let url = nip
|
||||
.to_url()
|
||||
.ok_or_else(|| Error::CustomError("invalid NIP-05 URL".to_owned()))?;
|
||||
let req = hyper::Request::builder()
|
||||
.method(hyper::Method::GET)
|
||||
.uri(url)
|
||||
.header("Accept", "application/json")
|
||||
.header(
|
||||
"User-Agent",
|
||||
format!(
|
||||
"nostr-rs-relay/{} NIP-05 Verifier",
|
||||
crate::info::CARGO_PKG_VERSION.unwrap()
|
||||
),
|
||||
)
|
||||
.body(hyper::Body::empty())
|
||||
.expect("request builder");
|
||||
|
||||
let response_fut = self.client.request(req);
|
||||
|
||||
// HTTP request with timeout
|
||||
match tokio::time::timeout(Duration::from_secs(5), response_fut).await {
|
||||
Ok(response_res) => {
|
||||
let response = response_res?;
|
||||
// limit size of verification document to 1MB.
|
||||
const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024;
|
||||
// determine content length from response
|
||||
let response_content_length = match response.body().size_hint().upper() {
|
||||
Some(v) => v,
|
||||
None => MAX_ALLOWED_RESPONSE_SIZE + 1, // reject missing content length
|
||||
};
|
||||
// TODO: test how hyper handles the client providing an inaccurate content-length.
|
||||
if response_content_length <= MAX_ALLOWED_RESPONSE_SIZE {
|
||||
let (parts, body) = response.into_parts();
|
||||
// TODO: consider redirects
|
||||
if parts.status == http::StatusCode::OK {
|
||||
// parse body, determine if the username / key / address is present
|
||||
let body_bytes = hyper::body::to_bytes(body).await?;
|
||||
let body_matches = body_contains_user(&nip.local, pubkey, body_bytes)?;
|
||||
if body_matches {
|
||||
return Ok(UserWebVerificationStatus::Verified);
|
||||
} else {
|
||||
// successful response, parsed as a nip-05
|
||||
// document, but this name/pubkey was not
|
||||
// present.
|
||||
return Ok(UserWebVerificationStatus::Unverified);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
"content length missing or exceeded limits for account: {:?}",
|
||||
nip.to_string()
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
info!("timeout verifying account {:?}", nip);
|
||||
return Ok(UserWebVerificationStatus::Unknown);
|
||||
}
|
||||
}
|
||||
Ok(UserWebVerificationStatus::Unknown)
|
||||
}
|
||||
|
||||
/// Perform NIP-05 verifier tasks.
|
||||
pub async fn run(&mut self) {
|
||||
// use this to schedule periodic re-validation tasks
|
||||
// run a loop, restarting on failure
|
||||
loop {
|
||||
let res = self.run_internal().await;
|
||||
if let Err(e) = res {
|
||||
info!("error in verifier: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal select loop for performing verification
|
||||
async fn run_internal(&mut self) -> Result<()> {
|
||||
tokio::select! {
|
||||
m = self.metadata_rx.recv() => {
|
||||
match m {
|
||||
Ok(e) => {
|
||||
if let Some(naddr) = e.get_nip05_addr() {
|
||||
info!("got metadata event for ({:?},{:?})", naddr.to_string() ,e.get_author_prefix());
|
||||
// Process a new author, checking if they are verified:
|
||||
let check_verified = get_latest_user_verification(self.read_pool.get().expect("could not get connection"), &e.pubkey).await;
|
||||
// ensure the event we got is more recent than the one we have, otherwise we can ignore it.
|
||||
if let Ok(last_check) = check_verified {
|
||||
if e.created_at <= last_check.event_created {
|
||||
// this metadata is from the same author as an existing verification.
|
||||
// it is older than what we have, so we can ignore it.
|
||||
debug!("received older metadata event for author {:?}", e.get_author_prefix());
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
// old, or no existing record for this user. In either case, we just create a new one.
|
||||
let start = Instant::now();
|
||||
let v = self.get_web_verification_from_event(&e).await;
|
||||
info!(
|
||||
"checked name {:?}, result: {:?}, in: {:?}",
|
||||
naddr.to_string(),
|
||||
v,
|
||||
start.elapsed()
|
||||
);
|
||||
// sleep to limit how frequently we make HTTP requests for new metadata events. This should limit us to 4 req/sec.
|
||||
tokio::time::sleep(Duration::from_millis(250)).await;
|
||||
// if this user was verified, we need to write the
|
||||
// record, persist the event, and broadcast.
|
||||
if let UserWebVerificationStatus::Verified = v {
|
||||
self.create_new_verified_user(&naddr.to_string(), &e).await?;
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(tokio::sync::broadcast::error::RecvError::Lagged(c)) => {
|
||||
warn!("incoming metadata events overwhelmed buffer, {} events dropped",c);
|
||||
}
|
||||
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
|
||||
info!("metadata broadcast channel closed");
|
||||
}
|
||||
}
|
||||
},
|
||||
_ = self.reverify_interval.tick() => {
|
||||
// check and see if there is an old account that needs
|
||||
// to be reverified
|
||||
self.do_reverify().await?;
|
||||
},
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reverify the oldest user verification record.
|
||||
async fn do_reverify(&mut self) -> Result<()> {
|
||||
let reverify_setting;
|
||||
let max_failures;
|
||||
{
|
||||
// this block prevents a read handle to settings being
|
||||
// captured by the async DB call (guard is not Send)
|
||||
let settings = SETTINGS.read().unwrap();
|
||||
reverify_setting = settings.verified_users.verify_update_frequency_duration;
|
||||
max_failures = settings.verified_users.max_consecutive_failures;
|
||||
}
|
||||
// get from settings, but default to 6hrs between re-checking an account
|
||||
let reverify_dur = reverify_setting.unwrap_or_else(|| Duration::from_secs(60 * 60 * 6));
|
||||
// find all verification records that have success or failure OLDER than the reverify_dur.
|
||||
let now = SystemTime::now();
|
||||
let earliest = now - reverify_dur;
|
||||
let earliest_epoch = earliest
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map(|x| x.as_secs())
|
||||
.unwrap_or(0);
|
||||
let vr = get_oldest_user_verification(self.read_pool.get()?, earliest_epoch).await;
|
||||
match vr {
|
||||
Ok(ref v) => {
|
||||
let new_status = self.get_web_verification(&v.name, &v.address).await;
|
||||
match new_status {
|
||||
UserWebVerificationStatus::Verified => {
|
||||
// freshly verified account, update the
|
||||
// timestamp.
|
||||
self.update_verification_record(self.write_pool.get()?, v)
|
||||
.await?;
|
||||
}
|
||||
UserWebVerificationStatus::DomainNotAllowed
|
||||
| UserWebVerificationStatus::Unknown => {
|
||||
// server may be offline, or temporarily
|
||||
// blocked by the config file. Note the
|
||||
// failure so we can process something
|
||||
// else.
|
||||
|
||||
// have we had enough failures to give up?
|
||||
if v.failure_count >= max_failures as u64 {
|
||||
info!(
|
||||
"giving up on verifying {:?} after {} failures",
|
||||
v.name, v.failure_count
|
||||
);
|
||||
self.delete_verification_record(self.write_pool.get()?, v)
|
||||
.await?;
|
||||
} else {
|
||||
// record normal failure, incrementing failure count
|
||||
self.fail_verification_record(self.write_pool.get()?, v)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
UserWebVerificationStatus::Unverified => {
|
||||
// domain has removed the verification, drop
|
||||
// the record on our side.
|
||||
self.delete_verification_record(self.write_pool.get()?, v)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
|
||||
// No users need verification. Reset the interval to
|
||||
// the next verification attempt.
|
||||
let start = tokio::time::Instant::now() + self.wait_after_finish;
|
||||
self.reverify_interval = tokio::time::interval_at(start, self.http_wait_duration);
|
||||
}
|
||||
Err(ref e) => {
|
||||
warn!(
|
||||
"Error when checking for NIP-05 verification records: {:?}",
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reset the verification timestamp on a VerificationRecord
|
||||
pub async fn update_verification_record(
|
||||
&mut self,
|
||||
mut conn: db::PooledConnection,
|
||||
vr: &VerificationRecord,
|
||||
) -> Result<()> {
|
||||
let vr_id = vr.rowid;
|
||||
let vr_str = vr.to_string();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
// add some jitter to the verification to prevent everything from stacking up together.
|
||||
let verif_time = now_jitter(600);
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
// update verification time and reset any failure count
|
||||
let query =
|
||||
"UPDATE user_verification SET verified_at=?, failure_count=0 WHERE id=?";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![verif_time, vr_id])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("verification updated for {}", vr_str);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
/// Reset the failure timestamp on a VerificationRecord
|
||||
pub async fn fail_verification_record(
|
||||
&mut self,
|
||||
mut conn: db::PooledConnection,
|
||||
vr: &VerificationRecord,
|
||||
) -> Result<()> {
|
||||
let vr_id = vr.rowid;
|
||||
let vr_str = vr.to_string();
|
||||
let fail_count = vr.failure_count.saturating_add(1);
|
||||
tokio::task::spawn_blocking(move || {
|
||||
// add some jitter to the verification to prevent everything from stacking up together.
|
||||
let fail_time = now_jitter(600);
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let query = "UPDATE user_verification SET failed_at=?, failure_count=? WHERE id=?";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![fail_time, fail_count, vr_id])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("verification failed for {}", vr_str);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
/// Delete a VerificationRecord that is no longer valid
|
||||
pub async fn delete_verification_record(
|
||||
&mut self,
|
||||
mut conn: db::PooledConnection,
|
||||
vr: &VerificationRecord,
|
||||
) -> Result<()> {
|
||||
let vr_id = vr.rowid;
|
||||
let vr_str = vr.to_string();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let query = "DELETE FROM user_verification WHERE id=?;";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![vr_id])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("verification rescinded for {}", vr_str);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
|
||||
/// Persist an event, create a verification record, and broadcast.
|
||||
// TODO: have more event-writing logic handled in the db module.
|
||||
// Right now, these events avoid the rate limit. That is
|
||||
// acceptable since as soon as the user is registered, this path
|
||||
// is no longer used.
|
||||
// TODO: refactor these into spawn_blocking
|
||||
// calls to get them off the async executors.
|
||||
async fn create_new_verified_user(&mut self, name: &str, event: &Event) -> Result<()> {
|
||||
let start = Instant::now();
|
||||
// we should only do this if we are enabled. if we are
|
||||
// disabled/passive, the event has already been persisted.
|
||||
let should_write_event;
|
||||
{
|
||||
let settings = SETTINGS.read().unwrap();
|
||||
should_write_event = settings.verified_users.is_enabled()
|
||||
}
|
||||
if should_write_event {
|
||||
match db::write_event(&mut self.write_pool.get()?, event) {
|
||||
Ok(updated) => {
|
||||
if updated != 0 {
|
||||
info!(
|
||||
"persisted event: {:?} in {:?}",
|
||||
event.get_event_id_prefix(),
|
||||
start.elapsed()
|
||||
);
|
||||
self.event_tx.send(event.clone()).ok();
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("event insert failed: {:?}", err);
|
||||
if let Error::SqlError(r) = err {
|
||||
warn!("because: : {:?}", r);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// write the verification record
|
||||
save_verification_record(self.write_pool.get()?, event, name).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of checking user's verification status against DNS/HTTP.
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
pub enum UserWebVerificationStatus {
|
||||
Verified, // user is verified, as of now.
|
||||
DomainNotAllowed, // domain blacklist or whitelist denied us from attempting a verification
|
||||
Unknown, // user's status could not be determined (timeout, server error)
|
||||
Unverified, // user's status is not verified (successful check, name / addr do not match)
|
||||
}
|
||||
|
||||
/// A NIP-05 verification record.
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
// Basic information for a verification event. Gives us all we need to assert a NIP-05 address is good.
|
||||
pub struct VerificationRecord {
|
||||
pub rowid: u64, // database row for this verification event
|
||||
pub name: Nip05Name, // address being verified
|
||||
pub address: String, // pubkey
|
||||
pub event: String, // event ID hash providing the verification
|
||||
pub event_created: u64, // when the metadata event was published
|
||||
pub last_success: Option<u64>, // the most recent time a verification was provided. None if verification under this name has never succeeded.
|
||||
pub last_failure: Option<u64>, // the most recent time verification was attempted, but could not be completed.
|
||||
pub failure_count: u64, // how many consecutive failures have been observed.
|
||||
}
|
||||
|
||||
/// Check with settings to determine if a given domain is allowed to
|
||||
/// publish.
|
||||
pub fn is_domain_allowed(domain: &str) -> bool {
|
||||
let settings = SETTINGS.read().unwrap();
|
||||
// if there is a whitelist, domain must be present in it.
|
||||
if let Some(wl) = &settings.verified_users.domain_whitelist {
|
||||
// workaround for Vec contains not accepting &str
|
||||
return wl.iter().any(|x| x == domain);
|
||||
}
|
||||
// otherwise, check that user is not in the blacklist
|
||||
if let Some(bl) = &settings.verified_users.domain_blacklist {
|
||||
return !bl.iter().any(|x| x == domain);
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
impl VerificationRecord {
|
||||
/// Check if the record is recent enough to be considered valid,
|
||||
/// and the domain is allowed.
|
||||
pub fn is_valid(&self) -> bool {
|
||||
let settings = SETTINGS.read().unwrap();
|
||||
// how long a verification record is good for
|
||||
let nip05_expiration = &settings.verified_users.verify_expiration_duration;
|
||||
if let Some(e) = nip05_expiration {
|
||||
if !self.is_current(e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// check domains
|
||||
is_domain_allowed(&self.name.domain)
|
||||
}
|
||||
|
||||
/// Check if this record has been validated since the given
|
||||
/// duration.
|
||||
fn is_current(&self, d: &Duration) -> bool {
|
||||
match self.last_success {
|
||||
Some(s) => {
|
||||
// current time - duration
|
||||
let now = SystemTime::now();
|
||||
let cutoff = now - *d;
|
||||
let cutoff_epoch = cutoff
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map(|x| x.as_secs())
|
||||
.unwrap_or(0);
|
||||
s > cutoff_epoch
|
||||
}
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for VerificationRecord {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"({:?},{:?})",
|
||||
self.name.to_string(),
|
||||
self.address.chars().take(8).collect::<String>()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new verification record based on an event
|
||||
pub async fn save_verification_record(
|
||||
mut conn: db::PooledConnection,
|
||||
event: &Event,
|
||||
name: &str,
|
||||
) -> Result<()> {
|
||||
let e = hex::decode(&event.id).ok();
|
||||
let n = name.to_owned();
|
||||
let a_prefix = event.get_author_prefix();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
// if we create a /new/ one, we should get rid of any old ones. or group the new ones by name and only consider the latest.
|
||||
let query = "INSERT INTO user_verification (metadata_event, name, verified_at) VALUES ((SELECT id from event WHERE event_hash=?), ?, strftime('%s','now'));";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![e, n])?;
|
||||
// get the row ID
|
||||
let v_id = tx.last_insert_rowid();
|
||||
// delete everything else by this name
|
||||
let del_query = "DELETE FROM user_verification WHERE name = ? AND id != ?;";
|
||||
let mut del_stmt = tx.prepare(del_query)?;
|
||||
let count = del_stmt.execute(params![n,v_id])?;
|
||||
if count > 0 {
|
||||
info!("removed {} old verification records for ({:?},{:?})", count, n, a_prefix);
|
||||
}
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("saved new verification record for ({:?},{:?})", n, a_prefix);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
}).await?
|
||||
}
|
||||
|
||||
/// Retrieve the most recent verification record for a given pubkey (async).
|
||||
pub async fn get_latest_user_verification(
|
||||
conn: db::PooledConnection,
|
||||
pubkey: &str,
|
||||
) -> Result<VerificationRecord> {
|
||||
let p = pubkey.to_owned();
|
||||
tokio::task::spawn_blocking(move || query_latest_user_verification(conn, p)).await?
|
||||
}
|
||||
|
||||
/// Query database for the latest verification record for a given pubkey.
|
||||
pub fn query_latest_user_verification(
|
||||
mut conn: db::PooledConnection,
|
||||
pubkey: String,
|
||||
) -> Result<VerificationRecord> {
|
||||
let tx = conn.transaction()?;
|
||||
let query = "SELECT v.id, v.name, e.event_hash, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE e.author=? ORDER BY e.created_at DESC, v.verified_at DESC, v.failed_at DESC LIMIT 1;";
|
||||
let mut stmt = tx.prepare_cached(query)?;
|
||||
let fields = stmt.query_row(params![hex::decode(&pubkey).ok()], |r| {
|
||||
let rowid: u64 = r.get(0)?;
|
||||
let rowname: String = r.get(1)?;
|
||||
let eventid: Vec<u8> = r.get(2)?;
|
||||
let created_at: u64 = r.get(3)?;
|
||||
// create a tuple since we can't throw non-rusqlite errors in this closure
|
||||
Ok((
|
||||
rowid,
|
||||
rowname,
|
||||
eventid,
|
||||
created_at,
|
||||
r.get(4).ok(),
|
||||
r.get(5).ok(),
|
||||
r.get(6)?,
|
||||
))
|
||||
})?;
|
||||
Ok(VerificationRecord {
|
||||
rowid: fields.0,
|
||||
name: Nip05Name::try_from(&fields.1[..])?,
|
||||
address: pubkey,
|
||||
event: hex::encode(fields.2),
|
||||
event_created: fields.3,
|
||||
last_success: fields.4,
|
||||
last_failure: fields.5,
|
||||
failure_count: fields.6,
|
||||
})
|
||||
}
|
||||
|
||||
/// Retrieve the oldest user verification (async)
|
||||
pub async fn get_oldest_user_verification(
|
||||
conn: db::PooledConnection,
|
||||
earliest: u64,
|
||||
) -> Result<VerificationRecord> {
|
||||
let res =
|
||||
tokio::task::spawn_blocking(move || query_oldest_user_verification(conn, earliest)).await?;
|
||||
res
|
||||
}
|
||||
|
||||
pub fn query_oldest_user_verification(
|
||||
mut conn: db::PooledConnection,
|
||||
earliest: u64,
|
||||
) -> Result<VerificationRecord> {
|
||||
let tx = conn.transaction()?;
|
||||
let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;";
|
||||
let mut stmt = tx.prepare_cached(query)?;
|
||||
let fields = stmt.query_row(params![earliest, earliest], |r| {
|
||||
let rowid: u64 = r.get(0)?;
|
||||
let rowname: String = r.get(1)?;
|
||||
let eventid: Vec<u8> = r.get(2)?;
|
||||
let pubkey: Vec<u8> = r.get(3)?;
|
||||
let created_at: u64 = r.get(4)?;
|
||||
// create a tuple since we can't throw non-rusqlite errors in this closure
|
||||
Ok((
|
||||
rowid,
|
||||
rowname,
|
||||
eventid,
|
||||
pubkey,
|
||||
created_at,
|
||||
r.get(5).ok(),
|
||||
r.get(6).ok(),
|
||||
r.get(7)?,
|
||||
))
|
||||
})?;
|
||||
let vr = VerificationRecord {
|
||||
rowid: fields.0,
|
||||
name: Nip05Name::try_from(&fields.1[..])?,
|
||||
address: hex::encode(fields.3),
|
||||
event: hex::encode(fields.2),
|
||||
event_created: fields.4,
|
||||
last_success: fields.5,
|
||||
last_failure: fields.6,
|
||||
failure_count: fields.7,
|
||||
};
|
||||
Ok(vr)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn local_from_inet() {
|
||||
let addr = "bob@example.com";
|
||||
let parsed = Nip05Name::try_from(addr);
|
||||
assert!(!parsed.is_err());
|
||||
let v = parsed.unwrap();
|
||||
assert_eq!(v.local, "bob");
|
||||
assert_eq!(v.domain, "example.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn not_enough_sep() {
|
||||
let addr = "bob_example.com";
|
||||
let parsed = Nip05Name::try_from(addr);
|
||||
assert!(parsed.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn too_many_sep() {
|
||||
let addr = "foo@bob@example.com";
|
||||
let parsed = Nip05Name::try_from(addr);
|
||||
assert!(parsed.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_local_name() {
|
||||
// non-permitted ascii chars
|
||||
assert!(Nip05Name::try_from("foo!@example.com").is_err());
|
||||
assert!(Nip05Name::try_from("foo @example.com").is_err());
|
||||
assert!(Nip05Name::try_from(" foo@example.com").is_err());
|
||||
assert!(Nip05Name::try_from("f oo@example.com").is_err());
|
||||
assert!(Nip05Name::try_from("foo<@example.com").is_err());
|
||||
// unicode dash
|
||||
assert!(Nip05Name::try_from("foo‐bar@example.com").is_err());
|
||||
// emoji
|
||||
assert!(Nip05Name::try_from("foo😭bar@example.com").is_err());
|
||||
}
|
||||
#[test]
|
||||
fn invalid_domain_name() {
|
||||
// non-permitted ascii chars
|
||||
assert!(Nip05Name::try_from("foo@examp!e.com").is_err());
|
||||
assert!(Nip05Name::try_from("foo@ example.com").is_err());
|
||||
assert!(Nip05Name::try_from("foo@exa mple.com").is_err());
|
||||
assert!(Nip05Name::try_from("foo@example .com").is_err());
|
||||
assert!(Nip05Name::try_from("foo@exa<mple.com").is_err());
|
||||
// unicode dash
|
||||
assert!(Nip05Name::try_from("foobar@exa‐mple.com").is_err());
|
||||
// emoji
|
||||
assert!(Nip05Name::try_from("foobar@ex😭ample.com").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn to_url() {
|
||||
let nip = Nip05Name::try_from("foobar@example.com").unwrap();
|
||||
assert_eq!(
|
||||
nip.to_url(),
|
||||
Some(
|
||||
"https://example.com/.well-known/nostr.json?name=foobar"
|
||||
.parse()
|
||||
.unwrap()
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
@@ -1,132 +0,0 @@
|
||||
//! Nostr protocol layered over WebSocket
|
||||
use crate::close::CloseCmd;
|
||||
use crate::config;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::event::EventCmd;
|
||||
use crate::subscription::Subscription;
|
||||
use core::pin::Pin;
|
||||
use futures::sink::Sink;
|
||||
use futures::stream::Stream;
|
||||
use futures::task::Context;
|
||||
use futures::task::Poll;
|
||||
use hyper::upgrade::Upgraded;
|
||||
use log::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio_tungstenite::WebSocketStream;
|
||||
use tungstenite::error::Error as WsError;
|
||||
use tungstenite::protocol::Message;
|
||||
|
||||
/// Nostr protocol messages from a client
|
||||
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
|
||||
#[serde(untagged)]
|
||||
pub enum NostrMessage {
|
||||
/// An `EVENT` message
|
||||
EventMsg(EventCmd),
|
||||
/// A `REQ` message
|
||||
SubMsg(Subscription),
|
||||
/// A `CLOSE` message
|
||||
CloseMsg(CloseCmd),
|
||||
}
|
||||
|
||||
/// Nostr protocol messages from a relay/server
|
||||
#[derive(Deserialize, Serialize, Clone, PartialEq, Debug)]
|
||||
pub enum NostrResponse {
|
||||
/// A `NOTICE` response
|
||||
NoticeRes(String),
|
||||
/// An `EVENT` response, composed of the subscription identifier,
|
||||
/// and serialized event JSON
|
||||
EventRes(String, String),
|
||||
}
|
||||
|
||||
/// A Nostr protocol stream is layered on top of a Websocket stream.
|
||||
pub struct NostrStream {
|
||||
ws_stream: WebSocketStream<Upgraded>,
|
||||
}
|
||||
|
||||
/// Given a websocket, return a protocol stream wrapper.
|
||||
pub fn wrap_ws_in_nostr(ws: WebSocketStream<Upgraded>) -> NostrStream {
|
||||
NostrStream { ws_stream: ws }
|
||||
}
|
||||
|
||||
/// Implement the [`Stream`] interface to produce Nostr messages.
|
||||
impl Stream for NostrStream {
|
||||
type Item = Result<NostrMessage>;
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
// get the configuration
|
||||
/// Convert Message to NostrMessage
|
||||
fn convert(msg: String) -> Result<NostrMessage> {
|
||||
let config = config::SETTINGS.read().unwrap();
|
||||
let parsed_res: Result<NostrMessage> = serde_json::from_str(&msg).map_err(|e| e.into());
|
||||
match parsed_res {
|
||||
Ok(m) => {
|
||||
if let NostrMessage::EventMsg(_) = m {
|
||||
if let Some(max_size) = config.limits.max_event_bytes {
|
||||
// check length, ensure that some max size is set.
|
||||
if msg.len() > max_size && max_size > 0 {
|
||||
return Err(Error::EventMaxLengthError(msg.len()));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(m)
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("proto parse error: {:?}", e);
|
||||
Err(Error::ProtoParseError)
|
||||
}
|
||||
}
|
||||
}
|
||||
match Pin::new(&mut self.ws_stream).poll_next(cx) {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(None) => Poll::Ready(None),
|
||||
Poll::Ready(Some(v)) => match v {
|
||||
Ok(Message::Text(vs)) => Poll::Ready(Some(convert(vs))),
|
||||
Ok(Message::Binary(_)) => Poll::Ready(Some(Err(Error::ProtoParseError))),
|
||||
Ok(Message::Pong(_)) | Ok(Message::Ping(_)) => Poll::Pending,
|
||||
Ok(Message::Close(_)) => Poll::Ready(None),
|
||||
Err(WsError::AlreadyClosed) | Err(WsError::ConnectionClosed) => Poll::Ready(None),
|
||||
Err(_) => Poll::Ready(Some(Err(Error::ConnError))),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement the [`Sink`] interface to produce Nostr responses.
|
||||
impl Sink<NostrResponse> for NostrStream {
|
||||
type Error = Error;
|
||||
|
||||
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
// map the error type
|
||||
match Pin::new(&mut self.ws_stream).poll_ready(cx) {
|
||||
Poll::Ready(Ok(())) => Poll::Ready(Ok(())),
|
||||
Poll::Ready(Err(_)) => Poll::Ready(Err(Error::ConnWriteError)),
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
|
||||
fn start_send(mut self: Pin<&mut Self>, item: NostrResponse) -> Result<(), Self::Error> {
|
||||
// TODO: do real escaping for these - at least on NOTICE,
|
||||
// which surely has some problems if arbitrary text is sent.
|
||||
let send_str = match item {
|
||||
NostrResponse::NoticeRes(msg) => {
|
||||
let s = msg.replace("\"", "");
|
||||
format!("[\"NOTICE\",\"{}\"]", s)
|
||||
}
|
||||
NostrResponse::EventRes(sub, eventstr) => {
|
||||
let subesc = sub.replace("\"", "");
|
||||
format!("[\"EVENT\",\"{}\",{}]", subesc, eventstr)
|
||||
}
|
||||
};
|
||||
match Pin::new(&mut self.ws_stream).start_send(Message::Text(send_str)) {
|
||||
Ok(()) => Ok(()),
|
||||
Err(_) => Err(Error::ConnWriteError),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
}
|
237
src/schema.rs
Normal file
237
src/schema.rs
Normal file
@@ -0,0 +1,237 @@
|
||||
//! Database schema and migrations
|
||||
use crate::db::PooledConnection;
|
||||
use crate::error::Result;
|
||||
use crate::utils::is_hex;
|
||||
use log::*;
|
||||
use rusqlite::limits::Limit;
|
||||
use rusqlite::params;
|
||||
use rusqlite::Connection;
|
||||
|
||||
// TODO: drop the pubkey_ref and event_ref tables
|
||||
|
||||
/// Startup DB Pragmas
|
||||
pub const STARTUP_SQL: &str = r##"
|
||||
PRAGMA main.synchronous=NORMAL;
|
||||
PRAGMA foreign_keys = ON;
|
||||
pragma mmap_size = 536870912; -- 512MB of mmap
|
||||
"##;
|
||||
|
||||
/// Schema definition
|
||||
const INIT_SQL: &str = r##"
|
||||
-- Database settings
|
||||
PRAGMA encoding = "UTF-8";
|
||||
PRAGMA journal_mode=WAL;
|
||||
PRAGMA main.synchronous=NORMAL;
|
||||
PRAGMA foreign_keys = ON;
|
||||
PRAGMA application_id = 1654008667;
|
||||
PRAGMA user_version = 5;
|
||||
|
||||
-- Event Table
|
||||
CREATE TABLE IF NOT EXISTS event (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_hash BLOB NOT NULL, -- 4-byte hash
|
||||
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970)
|
||||
created_at INTEGER NOT NULL, -- when the event was authored
|
||||
author BLOB NOT NULL, -- author pubkey
|
||||
kind INTEGER NOT NULL, -- event kind
|
||||
hidden INTEGER, -- relevant for queries
|
||||
content TEXT NOT NULL -- serialized json of event object
|
||||
);
|
||||
|
||||
-- Event Indexes
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash);
|
||||
CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at);
|
||||
CREATE INDEX IF NOT EXISTS author_index ON event(author);
|
||||
CREATE INDEX IF NOT EXISTS kind_index ON event(kind);
|
||||
|
||||
-- Tag Table
|
||||
-- Tag values are stored as either a BLOB (if they come in as a
|
||||
-- hex-string), or TEXT otherwise.
|
||||
-- This means that searches need to select the appropriate column.
|
||||
CREATE TABLE IF NOT EXISTS tag (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
||||
name TEXT, -- the tag name ("p", "e", whatever)
|
||||
value TEXT, -- the tag value, if not hex.
|
||||
value_hex BLOB, -- the tag value, if it can be interpreted as a hex string.
|
||||
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_hex_index ON tag(value_hex);
|
||||
|
||||
-- NIP-05 User Validation
|
||||
CREATE TABLE IF NOT EXISTS user_verification (
|
||||
id INTEGER PRIMARY KEY,
|
||||
metadata_event INTEGER NOT NULL, -- the metadata event used for this validation.
|
||||
name TEXT NOT NULL, -- the nip05 field value (user@domain).
|
||||
verified_at INTEGER, -- timestamp this author/nip05 was most recently verified.
|
||||
failed_at INTEGER, -- timestamp a verification attempt failed (host down).
|
||||
failure_count INTEGER DEFAULT 0, -- number of consecutive failures.
|
||||
FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
|
||||
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
|
||||
"##;
|
||||
|
||||
/// Determine the current application database schema version.
|
||||
pub fn db_version(conn: &mut Connection) -> Result<usize> {
|
||||
let query = "PRAGMA user_version;";
|
||||
let curr_version = conn.query_row(query, [], |row| row.get(0))?;
|
||||
Ok(curr_version)
|
||||
}
|
||||
|
||||
/// Upgrade DB to latest version, and execute pragma settings
|
||||
pub fn upgrade_db(conn: &mut PooledConnection) -> Result<()> {
|
||||
// check the version.
|
||||
let mut curr_version = db_version(conn)?;
|
||||
info!("DB version = {:?}", curr_version);
|
||||
|
||||
debug!(
|
||||
"SQLite max query parameters: {}",
|
||||
conn.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER)
|
||||
);
|
||||
debug!(
|
||||
"SQLite max table/blob/text length: {} MB",
|
||||
(conn.limit(Limit::SQLITE_LIMIT_LENGTH) as f64 / (1024 * 1024) as f64).floor()
|
||||
);
|
||||
debug!(
|
||||
"SQLite max SQL length: {} MB",
|
||||
(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH) as f64 / (1024 * 1024) as f64).floor()
|
||||
);
|
||||
|
||||
// initialize from scratch
|
||||
if curr_version == 0 {
|
||||
match conn.execute_batch(INIT_SQL) {
|
||||
Ok(()) => {
|
||||
info!("database pragma/schema initialized to v4, and ready");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be initialized");
|
||||
}
|
||||
}
|
||||
}
|
||||
if curr_version == 1 {
|
||||
// only change is adding a hidden column to events.
|
||||
let upgrade_sql = r##"
|
||||
ALTER TABLE event ADD hidden INTEGER;
|
||||
UPDATE event SET hidden=FALSE;
|
||||
PRAGMA user_version = 2;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v1 -> v2");
|
||||
curr_version = 2;
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
}
|
||||
if curr_version == 2 {
|
||||
// this version lacks the tag column
|
||||
info!("database schema needs update from 2->3");
|
||||
let upgrade_sql = r##"
|
||||
CREATE TABLE IF NOT EXISTS tag (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
||||
name TEXT, -- the tag name ("p", "e", whatever)
|
||||
value TEXT, -- the tag value, if not hex.
|
||||
value_hex BLOB, -- the tag value, if it can be interpreted as a hex string.
|
||||
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
PRAGMA user_version = 3;
|
||||
"##;
|
||||
// TODO: load existing refs into tag table
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v2 -> v3");
|
||||
curr_version = 3;
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
info!("Starting transaction");
|
||||
// iterate over every event/pubkey tag
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let mut stmt = tx.prepare("select event_id, \"e\", lower(hex(referenced_event)) from event_ref union select event_id, \"p\", lower(hex(referenced_pubkey)) from pubkey_ref;")?;
|
||||
let mut tag_rows = stmt.query([])?;
|
||||
while let Some(row) = tag_rows.next()? {
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_id: u64 = row.get(0)?;
|
||||
let tag_name: String = row.get(1)?;
|
||||
let tag_value: String = row.get(2)?;
|
||||
// this will leave behind p/e tags that were non-hex, but they are invalid anyways.
|
||||
if is_hex(&tag_value) {
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tag_name, hex::decode(&tag_value).ok()],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("Upgrade complete");
|
||||
}
|
||||
if curr_version == 3 {
|
||||
info!("database schema needs update from 3->4");
|
||||
let upgrade_sql = r##"
|
||||
-- incoming metadata events with nip05
|
||||
CREATE TABLE IF NOT EXISTS user_verification (
|
||||
id INTEGER PRIMARY KEY,
|
||||
metadata_event INTEGER NOT NULL, -- the metadata event used for this validation.
|
||||
name TEXT NOT NULL, -- the nip05 field value (user@domain).
|
||||
verified_at INTEGER, -- timestamp this author/nip05 was most recently verified.
|
||||
failed_at INTEGER, -- timestamp a verification attempt failed (host down).
|
||||
failure_count INTEGER DEFAULT 0, -- number of consecutive failures.
|
||||
FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
|
||||
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
|
||||
PRAGMA user_version = 4;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v3 -> v4");
|
||||
curr_version = 4;
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if curr_version == 4 {
|
||||
info!("database schema needs update from 4->5");
|
||||
let upgrade_sql = r##"
|
||||
DROP TABLE IF EXISTS event_ref;
|
||||
DROP TABLE IF EXISTS pubkey_ref;
|
||||
PRAGMA user_version=5;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v4 -> v5");
|
||||
// uncomment if we have a newer version
|
||||
//curr_version = 5;
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
} else if curr_version == 5 {
|
||||
debug!("Database version was already current");
|
||||
} else if curr_version > 5 {
|
||||
panic!("Database version is newer than supported by this executable");
|
||||
}
|
||||
|
||||
// Setup PRAGMA
|
||||
conn.execute_batch(STARTUP_SQL)?;
|
||||
debug!("SQLite PRAGMA startup completed");
|
||||
Ok(())
|
||||
}
|
@@ -1,7 +1,11 @@
|
||||
//! Subscription and filter parsing
|
||||
use crate::error::Result;
|
||||
use crate::event::Event;
|
||||
use serde::de::Unexpected;
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// Subscription identifier and set of request filters
|
||||
#[derive(Serialize, PartialEq, Debug, Clone)]
|
||||
@@ -15,24 +19,76 @@ pub struct Subscription {
|
||||
/// Corresponds to client-provided subscription request elements. Any
|
||||
/// element can be present if it should be used in filtering, or
|
||||
/// absent ([`None`]) if it should be ignored.
|
||||
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
|
||||
#[derive(Serialize, PartialEq, Debug, Clone)]
|
||||
pub struct ReqFilter {
|
||||
/// Event hash
|
||||
pub id: Option<String>,
|
||||
/// Event kind
|
||||
pub kind: Option<u64>,
|
||||
/// Referenced event hash
|
||||
#[serde(rename = "#e")]
|
||||
pub event: Option<String>,
|
||||
/// Referenced public key for a petname
|
||||
#[serde(rename = "#p")]
|
||||
pub pubkey: Option<String>,
|
||||
/// Event hashes
|
||||
pub ids: Option<Vec<String>>,
|
||||
/// Event kinds
|
||||
pub kinds: Option<Vec<u64>>,
|
||||
/// Events published after this time
|
||||
pub since: Option<u64>,
|
||||
/// Events published before this time
|
||||
pub until: Option<u64>,
|
||||
/// List of author public keys
|
||||
pub authors: Option<Vec<String>>,
|
||||
/// Set of tags
|
||||
#[serde(skip)]
|
||||
pub tags: Option<HashMap<String, HashSet<String>>>,
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ReqFilter {
|
||||
fn deserialize<D>(deserializer: D) -> Result<ReqFilter, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let received: Value = Deserialize::deserialize(deserializer)?;
|
||||
let filter = received.as_object().ok_or_else(|| {
|
||||
serde::de::Error::invalid_type(
|
||||
Unexpected::Other("reqfilter is not an object"),
|
||||
&"a json object",
|
||||
)
|
||||
})?;
|
||||
let mut rf = ReqFilter {
|
||||
ids: None,
|
||||
kinds: None,
|
||||
since: None,
|
||||
until: None,
|
||||
authors: None,
|
||||
tags: None,
|
||||
};
|
||||
let mut ts = None;
|
||||
// iterate through each key, and assign values that exist
|
||||
for (key, val) in filter.into_iter() {
|
||||
// ids
|
||||
if key == "ids" {
|
||||
rf.ids = Deserialize::deserialize(val).ok();
|
||||
} else if key == "kinds" {
|
||||
rf.kinds = Deserialize::deserialize(val).ok();
|
||||
} else if key == "since" {
|
||||
rf.since = Deserialize::deserialize(val).ok();
|
||||
} else if key == "until" {
|
||||
rf.until = Deserialize::deserialize(val).ok();
|
||||
} else if key == "authors" {
|
||||
rf.authors = Deserialize::deserialize(val).ok();
|
||||
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
|
||||
// remove the prefix
|
||||
let tagname = &key[1..];
|
||||
if ts.is_none() {
|
||||
// Initialize the tag if necessary
|
||||
ts = Some(HashMap::new());
|
||||
}
|
||||
if let Some(m) = ts.as_mut() {
|
||||
let tag_vals: Option<Vec<String>> = Deserialize::deserialize(val).ok();
|
||||
if let Some(v) = tag_vals {
|
||||
let hs = HashSet::from_iter(v.into_iter());
|
||||
m.insert(tagname.to_owned(), hs);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
rf.tags = ts;
|
||||
Ok(rf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Subscription {
|
||||
@@ -42,7 +98,7 @@ impl<'de> Deserialize<'de> for Subscription {
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let mut v: serde_json::Value = Deserialize::deserialize(deserializer)?;
|
||||
let mut v: Value = Deserialize::deserialize(deserializer)?;
|
||||
// this shoud be a 3-or-more element array.
|
||||
// verify the first element is a String, REQ
|
||||
// get the subscription from the second element.
|
||||
@@ -77,6 +133,7 @@ impl<'de> Deserialize<'de> for Subscription {
|
||||
for fv in i {
|
||||
let f: ReqFilter = serde_json::from_value(fv.take())
|
||||
.map_err(|_| serde::de::Error::custom("could not parse filter"))?;
|
||||
// create indexes
|
||||
filters.push(f);
|
||||
}
|
||||
Ok(Subscription {
|
||||
@@ -103,46 +160,63 @@ impl Subscription {
|
||||
}
|
||||
}
|
||||
|
||||
impl ReqFilter {
|
||||
/// Check for a match within the authors list.
|
||||
// TODO: Ambiguity; what if the array is empty? Should we
|
||||
// consider that the same as null?
|
||||
fn authors_match(&self, event: &Event) -> bool {
|
||||
self.authors
|
||||
.as_ref()
|
||||
.map(|vs| vs.contains(&event.pubkey.to_owned()))
|
||||
.unwrap_or(true)
|
||||
fn prefix_match(prefixes: &[String], target: &str) -> bool {
|
||||
for prefix in prefixes {
|
||||
if target.starts_with(prefix) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
/// Check if this filter either matches, or does not care about the event tags.
|
||||
fn event_match(&self, event: &Event) -> bool {
|
||||
self.event
|
||||
// none matched
|
||||
false
|
||||
}
|
||||
|
||||
impl ReqFilter {
|
||||
fn ids_match(&self, event: &Event) -> bool {
|
||||
self.ids
|
||||
.as_ref()
|
||||
.map(|t| event.event_tag_match(t))
|
||||
.map(|vs| prefix_match(vs, &event.id))
|
||||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
/// Check if this filter either matches, or does not care about
|
||||
/// the pubkey/petname tags.
|
||||
fn pubkey_match(&self, event: &Event) -> bool {
|
||||
self.pubkey
|
||||
fn authors_match(&self, event: &Event) -> bool {
|
||||
self.authors
|
||||
.as_ref()
|
||||
.map(|t| event.pubkey_tag_match(t))
|
||||
.map(|vs| prefix_match(vs, &event.pubkey))
|
||||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
fn tag_match(&self, event: &Event) -> bool {
|
||||
// get the hashset from the filter.
|
||||
if let Some(map) = &self.tags {
|
||||
for (key, val) in map.iter() {
|
||||
let tag_match = event.generic_tag_val_intersect(key, val);
|
||||
// if there is no match for this tag, the match fails.
|
||||
if !tag_match {
|
||||
return false;
|
||||
}
|
||||
// if there was a match, we move on to the next one.
|
||||
}
|
||||
}
|
||||
// if the tag map is empty, the match succeeds (there was no filter)
|
||||
true
|
||||
}
|
||||
|
||||
/// Check if this filter either matches, or does not care about the kind.
|
||||
fn kind_match(&self, kind: u64) -> bool {
|
||||
self.kind.map(|v| v == kind).unwrap_or(true)
|
||||
self.kinds
|
||||
.as_ref()
|
||||
.map(|ks| ks.contains(&kind))
|
||||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
/// Determine if all populated fields in this filter match the provided event.
|
||||
pub fn interested_in_event(&self, event: &Event) -> bool {
|
||||
self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
|
||||
// self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
|
||||
self.ids_match(event)
|
||||
&& self.since.map(|t| event.created_at > t).unwrap_or(true)
|
||||
&& self.kind_match(event.kind)
|
||||
&& self.authors_match(event)
|
||||
&& self.pubkey_match(event)
|
||||
&& self.event_match(event)
|
||||
&& self.tag_match(event)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -173,27 +247,66 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_filter() {
|
||||
// unrecognized field in filter
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"foo\": 3}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_err());
|
||||
fn legacy_filter() {
|
||||
// legacy field in filter
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"kind\": 3}]";
|
||||
assert!(serde_json::from_str::<Subscription>(raw_json).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn author_filter() -> Result<()> {
|
||||
let raw_json = "[\"REQ\",\"some-id\",{\"author\": \"test-author-id\"}]";
|
||||
let raw_json = r#"["REQ","some-id",{"authors": ["test-author-id"]}]"#;
|
||||
let s: Subscription = serde_json::from_str(raw_json)?;
|
||||
assert_eq!(s.id, "some-id");
|
||||
assert_eq!(s.filters.len(), 1);
|
||||
let first_filter = s.filters.get(0).unwrap();
|
||||
assert_eq!(first_filter.author, Some("test-author-id".to_owned()));
|
||||
assert_eq!(
|
||||
first_filter.authors,
|
||||
Some(vec!("test-author-id".to_owned()))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn interest_author_prefix_match() -> Result<()> {
|
||||
// subscription with a filter for ID
|
||||
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors": ["abc"]}]"#)?;
|
||||
let e = Event {
|
||||
id: "foo".to_owned(),
|
||||
pubkey: "abcd".to_owned(),
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn interest_id_prefix_match() -> Result<()> {
|
||||
// subscription with a filter for ID
|
||||
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"]}]"#)?;
|
||||
let e = Event {
|
||||
id: "abcd".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
created_at: 0,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn interest_id_nomatch() -> Result<()> {
|
||||
// subscription with a filter for ID
|
||||
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"id":"abc"}]"#)?;
|
||||
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"ids": ["xyz"]}]"#)?;
|
||||
let e = Event {
|
||||
id: "abcde".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
@@ -202,15 +315,17 @@ mod tests {
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert_eq!(s.interested_in_event(&e), false);
|
||||
assert!(!s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn interest_time_and_id() -> Result<()> {
|
||||
// subscription with a filter for ID and time
|
||||
let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"id":"abc", "since": 1000}]"#)?;
|
||||
let s: Subscription =
|
||||
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 1000}]"#)?;
|
||||
let e = Event {
|
||||
id: "abc".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
@@ -219,8 +334,9 @@ mod tests {
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert_eq!(s.interested_in_event(&e), false);
|
||||
assert!(!s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -236,8 +352,9 @@ mod tests {
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert_eq!(s.interested_in_event(&e), true);
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -253,8 +370,9 @@ mod tests {
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert_eq!(s.interested_in_event(&e), true);
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -270,8 +388,9 @@ mod tests {
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert_eq!(s.interested_in_event(&e), true);
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
@@ -287,8 +406,9 @@ mod tests {
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert_eq!(s.interested_in_event(&e), true);
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -304,8 +424,9 @@ mod tests {
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert_eq!(s.interested_in_event(&e), false);
|
||||
assert!(!s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
15
src/utils.rs
Normal file
15
src/utils.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
//! Common utility functions
|
||||
use std::time::SystemTime;
|
||||
|
||||
/// Seconds since 1970.
|
||||
pub fn unix_time() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map(|x| x.as_secs())
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Check if a string contains only hex characters.
|
||||
pub fn is_hex(s: &str) -> bool {
|
||||
s.chars().all(|x| char::is_ascii_hexdigit(&x))
|
||||
}
|
Reference in New Issue
Block a user