mirror of
https://github.com/scsibug/nostr-rs-relay.git
synced 2025-09-01 03:40:46 -04:00
Compare commits
119 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
26f296f76f | ||
|
c3c9b5dcd2 | ||
|
da29bdd837 | ||
|
bacb85024c | ||
|
7a77c459bb | ||
|
34c8b04926 | ||
|
1032a51220 | ||
|
79abd981e1 | ||
|
b1957ab2b1 | ||
|
23aa6e7313 | ||
|
fb751ba252 | ||
|
7c5e851b82 | ||
|
f965c53434 | ||
|
74376d94e5 | ||
|
21d1bbcfe3 | ||
|
c3e13af9e3 | ||
|
05f70112e8 | ||
|
eab522dc39 | ||
|
edf7af1573 | ||
|
34f497a650 | ||
|
4adad4c3a9 | ||
|
70dfcb6a04 | ||
|
c50e10aa21 | ||
|
9e22776227 | ||
|
dad6911807 | ||
|
ddc58a2f1c | ||
|
1131c1986e | ||
|
06fcaad9a1 | ||
|
087b68128f | ||
|
4647476622 | ||
|
7a72e588ea | ||
|
9237eed735 | ||
|
f4beb884b3 | ||
|
73285683a3 | ||
|
2f10271903 | ||
|
a34516628b | ||
|
eba7a32615 | ||
|
4d746fad85 | ||
|
0582a891cc | ||
|
2bcddf8bbf | ||
|
1595ec783d | ||
|
a2d1d78e23 | ||
|
04db2203bb | ||
|
1c1b1a1802 | ||
|
993fec4eed | ||
|
beffeb4d86 | ||
|
5135f3b007 | ||
|
ba0b50bc9c | ||
|
c65c64275e | ||
|
80c459c36c | ||
|
8e4e2d824b | ||
|
c13961a5c4 | ||
|
05b08c7916 | ||
|
9a141dc950 | ||
|
8c9170d4e3 | ||
|
5508020777 | ||
|
43021910ea | ||
|
c0158af18b | ||
|
164603dedd | ||
|
c1c25a22f5 | ||
|
6df92f9580 | ||
|
440217e1ee | ||
|
96359aafab | ||
|
5414629298 | ||
|
2be75e18fb | ||
|
5f6ff4c2b7 | ||
|
df411c24fb | ||
|
39f9984c4f | ||
|
9d55731073 | ||
|
5638f70d66 | ||
|
98a08d054a | ||
|
0ef7d618a8 | ||
|
bf06bea808 | ||
|
e5ca8c2a86 | ||
|
8ea63f0b27 | ||
|
3229e4192f | ||
|
7fd9b55e70 | ||
|
5cecfba319 | ||
|
d0f57aea21 | ||
|
40abd6858e | ||
|
136e41d234 | ||
|
35a1973a46 | ||
|
1daa25600d | ||
|
692925942a | ||
|
84afd4b64e | ||
|
46160bb1f9 | ||
|
2fc9168a38 | ||
|
01d0d44868 | ||
|
93f6337fda | ||
|
f3a42712a6 | ||
|
27361d064a | ||
|
3bafb611e5 | ||
|
b960ab70de | ||
|
15e2f097aa | ||
|
185f9e7abb | ||
|
f44dae6ac9 | ||
|
abc356c17d | ||
|
81f8256c37 | ||
|
b3db2bd081 | ||
|
d31e974d56 | ||
|
36eaf9fea5 | ||
|
a16c4e698a | ||
|
e63d179424 | ||
|
28b7b83a6e | ||
|
2e42b1b86e | ||
|
bd07a11f50 | ||
|
bc4b45d4b8 | ||
|
1ca5d652de | ||
|
d7cceab8fc | ||
|
2805a96e5b | ||
|
ac14a0759f | ||
|
cdd4e5949f | ||
|
5999009779 | ||
|
e36c791c53 | ||
|
d95adbcb3d | ||
|
509736c56d | ||
|
8004ea9b44 | ||
|
866c239cc9 | ||
|
6012b57e95 |
@@ -7,6 +7,7 @@ environment:
|
||||
packages:
|
||||
- cargo
|
||||
- sqlite-devel
|
||||
- protobuf-compiler
|
||||
sources:
|
||||
- https://git.sr.ht/~gheartsfield/nostr-rs-relay/
|
||||
shell: false
|
||||
|
15
.github/workflows/ci.yml
vendored
15
.github/workflows/ci.yml
vendored
@@ -4,35 +4,36 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
|
||||
jobs:
|
||||
test_nostr-rs-relay:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Update local toolchain
|
||||
run: |
|
||||
sudo apt-get install -y protobuf-compiler
|
||||
rustup update
|
||||
rustup component add clippy
|
||||
rustup install nightly
|
||||
rustup install nightly
|
||||
|
||||
- name: Toolchain info
|
||||
run: |
|
||||
cargo --version --verbose
|
||||
rustc --version
|
||||
cargo clippy --version
|
||||
cargo clippy --version
|
||||
|
||||
# - name: Lint
|
||||
# run: |
|
||||
# cargo fmt -- --check
|
||||
# cargo clippy -- -D warnings
|
||||
# cargo clippy -- -D warnings
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
cargo check
|
||||
cargo test --all
|
||||
cargo test --all
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
cargo build --release
|
||||
cargo build --release --locked
|
||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,2 +1,4 @@
|
||||
/target
|
||||
**/target/
|
||||
nostr.db
|
||||
nostr.db-*
|
||||
justfile
|
||||
|
1944
Cargo.lock
generated
1944
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
20
Cargo.toml
20
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "nostr-rs-relay"
|
||||
version = "0.8.1"
|
||||
version = "0.8.13"
|
||||
edition = "2021"
|
||||
authors = ["Greg Heartsfield <scsibug@imap.cc>"]
|
||||
description = "A relay implementation for the Nostr protocol"
|
||||
@@ -13,9 +13,12 @@ categories = ["network-programming", "web-programming"]
|
||||
|
||||
[dependencies]
|
||||
clap = { version = "4.0.32", features = ["env", "default", "derive"]}
|
||||
tracing = "0.1.36"
|
||||
tracing-subscriber = "0.2.0"
|
||||
tracing = "0.1.37"
|
||||
tracing-appender = "0.2.2"
|
||||
tracing-subscriber = "0.3.16"
|
||||
tokio = { version = "1", features = ["full", "tracing", "signal"] }
|
||||
prost = "0.11"
|
||||
tonic = "0.8.3"
|
||||
console-subscriber = "0.1.8"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
@@ -36,7 +39,7 @@ lazy_static = "1.4"
|
||||
governor = "0.4"
|
||||
nonzero_ext = "0.3"
|
||||
hyper = { version="0.14", features=["client", "server","http1","http2","tcp"] }
|
||||
hyper-tls = "0.5"
|
||||
hyper-rustls = { version = "0.24" }
|
||||
http = { version = "0.2" }
|
||||
parse_duration = "2"
|
||||
rand = "0.8"
|
||||
@@ -49,6 +52,15 @@ chrono = "0.4.23"
|
||||
prometheus = "0.13.3"
|
||||
indicatif = "0.17.3"
|
||||
bech32 = "0.9.1"
|
||||
url = "2.3.1"
|
||||
qrcode = { version = "0.12.0", default-features = false, features = ["svg"] }
|
||||
nostr = { version = "0.18.0", default-features = false, features = ["base", "nip04", "nip19"] }
|
||||
[target.'cfg(not(target_env = "msvc"))'.dependencies]
|
||||
tikv-jemallocator = "0.5"
|
||||
log = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
anyhow = "1"
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = { version="0.8.3", features = ["prost"] }
|
||||
|
10
Dockerfile
10
Dockerfile
@@ -1,5 +1,7 @@
|
||||
FROM docker.io/library/rust:1.67.0 as builder
|
||||
|
||||
FROM docker.io/library/rust:1-bookworm as builder
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y cmake protobuf-compiler \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN USER=root cargo install cargo-auditable
|
||||
RUN USER=root cargo new --bin nostr-rs-relay
|
||||
WORKDIR ./nostr-rs-relay
|
||||
@@ -12,12 +14,14 @@ RUN rm src/*.rs
|
||||
|
||||
# copy project source code
|
||||
COPY ./src ./src
|
||||
COPY ./proto ./proto
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# build auditable release using locked deps
|
||||
RUN rm ./target/release/deps/nostr*relay*
|
||||
RUN cargo auditable build --release --locked
|
||||
|
||||
FROM docker.io/library/debian:bullseye-slim
|
||||
FROM docker.io/library/debian:bookworm-slim
|
||||
|
||||
ARG APP=/usr/src/app
|
||||
ARG APP_DATA=/usr/src/app/db
|
||||
|
@@ -35,6 +35,8 @@ mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
|
||||
- [ ] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md) (_implemented, but currently disabled_)
|
||||
- [x] NIP-28: [Public Chat](https://github.com/nostr-protocol/nips/blob/master/28.md)
|
||||
- [x] NIP-33: [Parameterized Replaceable Events](https://github.com/nostr-protocol/nips/blob/master/33.md)
|
||||
- [x] NIP-40: [Expiration Timestamp](https://github.com/nostr-protocol/nips/blob/master/40.md)
|
||||
- [x] NIP-42: [Authentication of clients to relays](https://github.com/nostr-protocol/nips/blob/master/42.md)
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -91,6 +93,11 @@ https://hub.docker.com/r/scsibug/nostr-rs-relay
|
||||
|
||||
Building `nostr-rs-relay` requires an installation of Cargo & Rust: https://www.rust-lang.org/tools/install
|
||||
|
||||
The following OS packages will be helpful; on Debian/Ubuntu:
|
||||
```console
|
||||
$ sudo apt-get install build-essential cmake protobuf-compiler pkg-config libssl-dev
|
||||
```
|
||||
|
||||
Clone this repository, and then build a release version of the relay:
|
||||
|
||||
```console
|
||||
@@ -139,7 +146,7 @@ settings.
|
||||
|
||||
For examples of putting the relay behind a reverse proxy (for TLS
|
||||
termination, load balancing, and other features), see [Reverse
|
||||
Proxy](reverse-proxy.md).
|
||||
Proxy](docs/reverse-proxy.md).
|
||||
|
||||
## Dev Channel
|
||||
|
||||
|
7
build.rs
Normal file
7
build.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
tonic_build::configure()
|
||||
.build_server(false)
|
||||
.protoc_arg("--experimental_allow_proto3_optional")
|
||||
.compile(&["proto/nauthz.proto"], &["proto"])?;
|
||||
Ok(())
|
||||
}
|
90
config.toml
90
config.toml
@@ -16,6 +16,13 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
|
||||
# Administrative contact URI
|
||||
#contact = "mailto:contact@example.com"
|
||||
|
||||
# Favicon location. Relative to the current directory. Assumes an
|
||||
# ICO format.
|
||||
#favicon = "favicon.ico"
|
||||
|
||||
# URL of Relay's icon.
|
||||
#relay_icon = "https://example.test/img.png"
|
||||
|
||||
[diagnostics]
|
||||
# Enable tokio tracing (for use with tokio-console)
|
||||
#tracing = false
|
||||
@@ -38,7 +45,7 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
|
||||
# Database connection pool settings for subscribers:
|
||||
|
||||
# Minimum number of SQLite reader connections
|
||||
#min_conn = 4
|
||||
#min_conn = 0
|
||||
|
||||
# Maximum number of SQLite reader connections. Recommend setting this
|
||||
# to approx the number of cores.
|
||||
@@ -48,6 +55,26 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
|
||||
# sqlite.
|
||||
#connection = "postgresql://postgres:nostr@localhost:7500/nostr"
|
||||
|
||||
# Optional database connection string for writing. Use this for
|
||||
# postgres clusters where you want to separate reads and writes to
|
||||
# different nodes. Ignore for single-database instances.
|
||||
#connection_write = "postgresql://postgres:nostr@localhost:7500/nostr"
|
||||
|
||||
[logging]
|
||||
# Directory to store log files. Log files roll over daily.
|
||||
#folder_path = "./log"
|
||||
#file_prefix = "nostr-relay"
|
||||
|
||||
[grpc]
|
||||
# gRPC interfaces for externalized decisions and other extensions to
|
||||
# functionality.
|
||||
#
|
||||
# Events can be authorized through an external service, by providing
|
||||
# the URL below. In the event the server is not accessible, events
|
||||
# will be permitted. The protobuf3 schema used is available in
|
||||
# `proto/nauthz.proto`.
|
||||
# event_admission_server = "http://[::1]:50051"
|
||||
|
||||
[network]
|
||||
# Bind to this network address
|
||||
address = "0.0.0.0"
|
||||
@@ -79,10 +106,10 @@ reject_future_seconds = 1800
|
||||
#
|
||||
#messages_per_sec = 5
|
||||
|
||||
# Limit client subscriptions created per second, averaged over one
|
||||
# minute. Must be an integer. If not set (or set to 0), defaults to
|
||||
# unlimited. Strongly recommended to set this to a low value such as
|
||||
# 10 to ensure fair service.
|
||||
# Limit client subscriptions created, averaged over one minute. Must
|
||||
# be an integer. If not set (or set to 0), defaults to unlimited.
|
||||
# Strongly recommended to set this to a low value such as 10 to ensure
|
||||
# fair service.
|
||||
#subscriptions_per_min = 0
|
||||
|
||||
# UNIMPLEMENTED...
|
||||
@@ -118,6 +145,11 @@ reject_future_seconds = 1800
|
||||
# 70202,
|
||||
#]
|
||||
|
||||
# Event kind allowlist. Events other than these kinds will be discarded.
|
||||
#event_kind_allowlist = [
|
||||
# 0, 1, 2, 3, 7, 40, 41, 42, 43, 44, 30023,
|
||||
#]
|
||||
|
||||
[authorization]
|
||||
# Pubkey addresses in this array are whitelisted for event publishing.
|
||||
# Only valid events by these authors will be accepted, if the variable
|
||||
@@ -126,6 +158,10 @@ reject_future_seconds = 1800
|
||||
# "35d26e4690cbe1a898af61cc3515661eb5fa763b57bd0b42e45099c8b32fd50f",
|
||||
# "887645fef0ce0c3c1218d2f5d8e6132a19304cdc57cd20281d082f38cfea0072",
|
||||
#]
|
||||
# Enable NIP-42 authentication
|
||||
#nip42_auth = false
|
||||
# Send DMs (kind 4 and 44) and gift wraps (kind 1059) only to their authenticated recipients
|
||||
#nip42_dms = false
|
||||
|
||||
[verified_users]
|
||||
# NIP-05 verification of users. Can be "enabled" to require NIP-05
|
||||
@@ -152,3 +188,47 @@ reject_future_seconds = 1800
|
||||
# How many consecutive failed checks before we give up on verifying
|
||||
# this author.
|
||||
#max_consecutive_failures = 20
|
||||
|
||||
[pay_to_relay]
|
||||
# Enable pay to relay
|
||||
#enabled = false
|
||||
|
||||
# The cost to be admitted to relay
|
||||
#admission_cost = 4200
|
||||
|
||||
# The cost in sats per post
|
||||
#cost_per_event = 0
|
||||
|
||||
# Url of lnbits api
|
||||
#node_url = "<node url>"
|
||||
|
||||
# LNBits api secret
|
||||
#api_secret = "<ln bits api>"
|
||||
|
||||
# Nostr direct message on signup
|
||||
#direct_message=true
|
||||
|
||||
# Terms of service
|
||||
#terms_message = """
|
||||
#This service (and supporting services) are provided "as is", without warranty of any kind, express or implied.
|
||||
#
|
||||
#By using this service, you agree:
|
||||
#* Not to engage in spam or abuse the relay service
|
||||
#* Not to disseminate illegal content
|
||||
#* That requests to delete content cannot be guaranteed
|
||||
#* To use the service in compliance with all applicable laws
|
||||
#* To grant necessary rights to your content for unlimited time
|
||||
#* To be of legal age and have capacity to use this service
|
||||
#* That the service may be terminated at any time without notice
|
||||
#* That the content you publish may be removed at any time without notice
|
||||
#* To have your IP address collected to detect abuse or misuse
|
||||
#* To cooperate with the relay to combat abuse or misuse
|
||||
#* You may be exposed to content that you might find triggering or distasteful
|
||||
#* The relay operator is not liable for content produced by users of the relay
|
||||
#"""
|
||||
|
||||
# Whether or not new sign ups should be allowed
|
||||
#sign_ups = false
|
||||
|
||||
# optional if `direct_message=false`
|
||||
#secret_key = "<nostr nsec>"
|
||||
|
14
contrib/nostr-rs-relay.service
Normal file
14
contrib/nostr-rs-relay.service
Normal file
@@ -0,0 +1,14 @@
|
||||
[Unit]
|
||||
Description=nostr-rs-relay
|
||||
|
||||
[Service]
|
||||
User=REPLACE_WITH_YOUR_USERNAME
|
||||
WorkingDirectory=/var/lib/nostr-rs-relay
|
||||
Environment=RUST_LOG=warn,nostr_rs_relay=info
|
||||
ExecStart=/usr/bin/nostr-rs-relay --config /etc/nostr-rs-relay/config.toml
|
||||
TimeoutStopSec=10
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@@ -7,7 +7,7 @@ intervention. For heavily trafficked relays, there are a number of
|
||||
steps that the operator may need to take to maintain performance and
|
||||
limit disk usage.
|
||||
|
||||
This maintenance guide is current as of version `0.7.14`. Future
|
||||
This maintenance guide is current as of version `0.8.2`. Future
|
||||
versions may incorporate and automate some of these steps.
|
||||
|
||||
## Backing Up the Database
|
||||
@@ -43,18 +43,15 @@ vacuum;
|
||||
|
||||
## Clearing Hidden Events
|
||||
|
||||
When events are deleted, either through deletion events, metadata or
|
||||
follower updates, or a replaceable event kind, the event is not
|
||||
actually removed from the database. Instead, a flag `HIDDEN` is set
|
||||
to true for the event, which excludes it from search results. The
|
||||
original intent was to ensure that subsequent rebroadcasts of the
|
||||
event would be easily detected as having been deleted, and would not
|
||||
need to be stored again. In practice, this decision causes excessive
|
||||
growth of the `tags` table, since all the previous followers are
|
||||
retained for those `HIDDEN` events.
|
||||
When events are deleted, the event is not actually removed from the
|
||||
database. Instead, a flag `HIDDEN` is set to true for the event,
|
||||
which excludes it from search results. High volume replacements from
|
||||
profile or other replaceable events are deleted, not hidden, in the
|
||||
current version of the relay.
|
||||
|
||||
The `event` and especially the `tag` table can be significantly
|
||||
reduced in size by running these commands:
|
||||
In the current version, removing hidden events should not result in
|
||||
significant space savings, but it can still be used if there is no
|
||||
desire to hold on to events that can never be re-broadcast.
|
||||
|
||||
```console
|
||||
PRAGMA foreign_keys = ON;
|
||||
@@ -81,18 +78,24 @@ PRAGMA foreign_keys = ON;
|
||||
delete from event where event_hash=x'00000000000c1271675dc86e3e1dd1336827bccabb90dc4c9d3b4465efefe00e';
|
||||
```
|
||||
|
||||
### Deleting All Events for Pubkey
|
||||
### Querying and Deleting All Events for Pubkey
|
||||
|
||||
```console
|
||||
PRAGMA foreign_keys = ON;
|
||||
|
||||
select lower(hex(author)) as author, count(*) as c from event group by author order by c asc;
|
||||
|
||||
delete from event where author=x'000000000002c7831d9c5a99f183afc2813a6f69a16edda7f6fc0ed8110566e6';
|
||||
```
|
||||
|
||||
### Deleting All Events of a Kind
|
||||
### Querying and Deleting All Events of a Kind
|
||||
|
||||
|
||||
```console
|
||||
PRAGMA foreign_keys = ON;
|
||||
|
||||
select printf('%7d', kind), count(*) as c from event group by kind order by c;
|
||||
|
||||
delete from event where kind=70202;
|
||||
```
|
||||
|
||||
@@ -109,7 +112,8 @@ seen" policy.
|
||||
|
||||
```console
|
||||
PRAGMA foreign_keys = ON;
|
||||
TODO!
|
||||
|
||||
DELETE FROM event WHERE first_seen < CAST(strftime('%s', date('now', '-30 day')) AS INT);
|
||||
```
|
||||
|
||||
### Delete Profile Events with No Recent Events
|
||||
|
79
docs/grpc-extensions.md
Normal file
79
docs/grpc-extensions.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# gRPC Extensions Design Document
|
||||
|
||||
The relay will be extensible through gRPC endpoints, definable in the
|
||||
main configuration file. These will allow external programs to host
|
||||
logic for deciding things such as, should this event be persisted,
|
||||
should this connection be allowed, and should this subscription
|
||||
request be registered. The primary goal is allow for relay operator
|
||||
specific functionality that allows them to serve smaller communities
|
||||
and reduce spam and abuse.
|
||||
|
||||
This will likely evolve substantially, the first goal is to get a
|
||||
basic one-way service that lets an externalized program decide on
|
||||
event persistence. This does not represent the final state of gRPC
|
||||
extensibility in `nostr-rs-relay`.
|
||||
|
||||
## Considerations
|
||||
|
||||
Write event latency must not be significantly affected. However, the
|
||||
primary reason we are implementing this is spam/abuse protection, so
|
||||
we are willing to tolerate some increase in latency if that protects
|
||||
us against outages!
|
||||
|
||||
The interface should provide enough information to make simple
|
||||
decisions, without burdening the relay to do extra queries. The
|
||||
decision endpoint will be mostly responsible for maintaining state and
|
||||
gathering additional details.
|
||||
|
||||
## Design Overview
|
||||
|
||||
A gRPC server may be defined in the `config.toml` file. If it exists,
|
||||
the relay will attempt to connect to it and send a message for each
|
||||
`EVENT` command submitted by clients. If a successful response is
|
||||
returned indicating the event is permitted, the relay continues
|
||||
processing the event as normal. All existing whitelist, blacklist,
|
||||
and `NIP-05` validation checks are still performed and MAY still
|
||||
result in the event being rejected. If a successful response is
|
||||
returned indicated the decision is anything other than permit, then
|
||||
the relay MUST reject the event, and return a command result to the
|
||||
user (using `NIP-20`) indicating the event was blocked (optionally
|
||||
providing a message).
|
||||
|
||||
In the event there is an error in the gRPC interface, event processing
|
||||
proceeds as if gRPC was disabled (fail open). This allows gRPC
|
||||
servers to be deployed with minimal chance of causing a full relay
|
||||
outage.
|
||||
|
||||
## Design Details
|
||||
|
||||
Currently one procedure call is supported, `EventAdmit`, in the
|
||||
`Authorization` service. It accepts the following data in order to
|
||||
support authorization decisions:
|
||||
|
||||
- The event itself
|
||||
- The client IP that submitted the event
|
||||
- The client's HTTP origin header, if one exists
|
||||
- The client's HTTP user agent header, if one exists
|
||||
- The public key of the client, if `NIP-42` authentication was
|
||||
performed (not supported in the relay yet!)
|
||||
- The `NIP-05` associated with the event's public key, if it is known
|
||||
to the relay
|
||||
|
||||
A server providing authorization decisions will return the following:
|
||||
|
||||
- A decision to permit or deny the event
|
||||
- An optional message that explains why the event was denied, to be
|
||||
transmitted to the client
|
||||
|
||||
## Security Issues
|
||||
|
||||
There is little attempt to secure this interface, since it is intended
|
||||
for use processes running on the same host. It is recommended to
|
||||
ensure that the gRPC server providing the API is not exposed to the
|
||||
public Internet. Authorization server implementations should have
|
||||
their own security reviews performed.
|
||||
|
||||
A slow gRPC server could cause availability issues for event
|
||||
processing, since this is performed on a single thread. Avoid any
|
||||
expensive or long-running processes that could result from submitted
|
||||
events, since any client can initiate a gRPC call to the service.
|
84
docs/pay-to-relay.md
Normal file
84
docs/pay-to-relay.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# Pay to Relay Design Document
|
||||
|
||||
The relay with use payment as a form of spam prevention. In order to post to the relay a user must pay a set rate. There is also the option to require a payment for each note posted to the relay. There is no cost to read from the relay.
|
||||
|
||||
## Configuration
|
||||
|
||||
Currently, [LNBits](https://github.com/lnbits/lnbits) is implemented as the payment processor. LNBits exposes a simple API for creating invoices, to use this API create a wallet and on the right side find "API info" you will need to add the invoice/read key to this relays config file.
|
||||
|
||||
The below configuration will need to be added to config.toml
|
||||
```
|
||||
[pay_to_relay]
|
||||
# Enable pay to relay
|
||||
enabled = true
|
||||
# The cost to be admitted to relay
|
||||
admission_cost = 1000
|
||||
# The cost in sats per post
|
||||
cost_per_event = 0
|
||||
# Url of lnbits api
|
||||
node_url = "https://<IP of node>:5001/api/v1/payments"
|
||||
# LNBits api secret
|
||||
api_secret = "<LNbits api key>"
|
||||
# Terms of service
|
||||
terms_message = """This service ....
|
||||
"""
|
||||
# Whether or not new sign ups should be allowed
|
||||
sign_ups = true
|
||||
secret_key = "<nostr secret key to send dms>"
|
||||
```
|
||||
|
||||
The LNBits instance must have a signed HTTPS a self signed certificate will not work.
|
||||
|
||||
## Design Overview
|
||||
|
||||
### Concepts
|
||||
|
||||
All authors are initially not admitted to write to the relay. There are two ways to gain access write to the relay. The first is by attempting to post the the relay, upon receiving an event from an author that is not admitted, the relay will send a direct message including the terms of service of the relay and a lighting invoice for the admission cost. Once this invoice is paid the author can write to the relay. For this method to work the author must be reading from the relay. An author can also pay and accept the terms of service via a webpage `https://<relay-url>/join`.
|
||||
|
||||
## Design Details
|
||||
|
||||
Authors are stored in a dedicated table. This tracks:
|
||||
|
||||
* `pubkey`
|
||||
* `is_admitted` whether on no the admission invoice has been paid, accepting the terms of service.
|
||||
* `balance` the current balance in sats of the author, used if there is a cost per post
|
||||
* `tos_accepted_at` the timestamp of when the author accepted the tos
|
||||
|
||||
Invoice information is stored in a dedicated table. This tracks:
|
||||
* `payment_hash` the payment hash of the lighting invoice
|
||||
* `pubkey` of the author the invoice is issued to
|
||||
* `invoice` bolt11 invoice
|
||||
* `amount` in sats
|
||||
* `status` (Paid/Unpaid/Expired)
|
||||
* `description`
|
||||
* `created_at` timestamp of creation
|
||||
* `confirmed_at` timestamp of payment
|
||||
|
||||
### Event Handling
|
||||
|
||||
If "pay to relay" is enabled, all incoming events are evaluated to determine whether the author is on the relay's whitelist or if they have paid the admission fee and accepted the terms. If "pay per note" is enabled, there is an additional check to ensure that the author has enough balance, which is then reduced by the cost per note. If the author is on the whitelist, this balance check is not necessary.
|
||||
|
||||
### Integration
|
||||
|
||||
We have an existing database writer thread, which receives events and
|
||||
attempts to persist them to disk. Once validated and persisted, these
|
||||
events are broadcast to all subscribers.
|
||||
|
||||
When "pay to relay" is enabled, the writer must check if the author is admitted to post. If the author is not admitted to post the event is forwarded to the payment module. Where an invoice is generated, persisted and broadcast as an direct message to the author.
|
||||
|
||||
### Threat Scenarios
|
||||
|
||||
Some of these mitigation's are fully implemented, others are documented
|
||||
simply to demonstrate a mitigation is possible.
|
||||
|
||||
### Sign up Spamming
|
||||
|
||||
*Threat*: An attacker generates a large number of new pubkeys publishing to the relays. Causing a large number of new invoices to be created for each new pubkey.
|
||||
|
||||
*Mitigation*: Rate limit number of new sign ups
|
||||
|
||||
### Admitted Author Spamming
|
||||
|
||||
*Threat*: An attacker gains write access by paying the admission fee, and then floods the relay with a large number of spam events.
|
||||
|
||||
*Mitigation*: The attacker's admission can be revoked and their admission fee will not be refunded. Enabling "cost per event" and increasing the admission cost can also discourage this type of behavior.
|
199
docs/reverse-proxy.md
Normal file
199
docs/reverse-proxy.md
Normal file
@@ -0,0 +1,199 @@
|
||||
# Reverse Proxy Setup Guide
|
||||
|
||||
It is recommended to run `nostr-rs-relay` behind a reverse proxy such
|
||||
as `haproxy`, `nginx` or `traefik` to provide TLS termination. Simple examples
|
||||
for `haproxy`, `nginx` and `traefik` configurations are documented here.
|
||||
|
||||
## Minimal HAProxy Configuration
|
||||
|
||||
Assumptions:
|
||||
|
||||
* HAProxy version is `2.4.10` or greater (older versions not tested).
|
||||
* Hostname for the relay is `relay.example.com`.
|
||||
* Your relay should be available over wss://relay.example.com
|
||||
* Your (NIP-11) relay info page should be available on https://relay.example.com
|
||||
* SSL certificate is located in `/etc/certs/example.com.pem`.
|
||||
* Relay is running on port 8080.
|
||||
* Limit connections to 400 concurrent.
|
||||
* HSTS (HTTP Strict Transport Security) is desired.
|
||||
* Only TLS 1.2 or greater is allowed.
|
||||
|
||||
```
|
||||
global
|
||||
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
|
||||
ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
|
||||
|
||||
frontend fe_prod
|
||||
mode http
|
||||
bind :443 ssl crt /etc/certs/example.com.pem alpn h2,http/1.1
|
||||
bind :80
|
||||
http-request set-header X-Forwarded-Proto https if { ssl_fc }
|
||||
redirect scheme https code 301 if !{ ssl_fc }
|
||||
acl host_relay hdr(host) -i -m beg relay.example.com
|
||||
use_backend relay if host_relay
|
||||
# HSTS (1 year)
|
||||
http-response set-header Strict-Transport-Security max-age=31536000
|
||||
|
||||
backend relay
|
||||
mode http
|
||||
timeout connect 5s
|
||||
timeout client 50s
|
||||
timeout server 50s
|
||||
timeout tunnel 1h
|
||||
timeout client-fin 30s
|
||||
option tcp-check
|
||||
default-server maxconn 400 check inter 20s fastinter 1s
|
||||
server relay 127.0.0.1:8080
|
||||
```
|
||||
|
||||
### HAProxy Notes
|
||||
|
||||
You may experience WebSocket connection problems with Firefox if
|
||||
HTTP/2 is enabled, for older versions of HAProxy (2.3.x). Either
|
||||
disable HTTP/2 (`h2`), or upgrade HAProxy.
|
||||
|
||||
## Bare-bones Nginx Configuration
|
||||
|
||||
Assumptions:
|
||||
|
||||
* `Nginx` version is `1.18.0` (other versions not tested).
|
||||
* Hostname for the relay is `relay.example.com`.
|
||||
* SSL certificate and key are located at `/etc/letsencrypt/live/relay.example.com/`.
|
||||
* Relay is running on port `8080`.
|
||||
|
||||
```
|
||||
http {
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name relay.example.com;
|
||||
ssl_certificate /etc/letsencrypt/live/relay.example.com/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/relay.example.com/privkey.pem;
|
||||
ssl_protocols TLSv1.3 TLSv1.2;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_ecdh_curve secp521r1:secp384r1;
|
||||
ssl_ciphers EECDH+AESGCM:EECDH+AES256;
|
||||
|
||||
# Optional Diffie-Helmann parameters
|
||||
# Generate with openssl dhparam -out /etc/ssl/certs/dhparam.pem 4096
|
||||
#ssl_dhparam /etc/ssl/certs/dhparam.pem;
|
||||
|
||||
ssl_session_cache shared:TLS:2m;
|
||||
ssl_buffer_size 4k;
|
||||
|
||||
# OCSP stapling
|
||||
ssl_stapling on;
|
||||
ssl_stapling_verify on;
|
||||
resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001]; # Cloudflare
|
||||
|
||||
# Set HSTS to 365 days
|
||||
add_header Strict-Transport-Security 'max-age=31536000; includeSubDomains; preload' always;
|
||||
keepalive_timeout 70;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:8080;
|
||||
proxy_http_version 1.1;
|
||||
proxy_read_timeout 1d;
|
||||
proxy_send_timeout 1d;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Nginx Notes
|
||||
|
||||
The above configuration was tested on `nginx` `1.18.0` on `Ubuntu` `20.04` and `22.04`
|
||||
|
||||
For help installing `nginx` on `Ubuntu`, see [this guide](https://www.digitalocean.com/community/tutorials/how-to-install-nginx-on-ubuntu-20-04).
|
||||
|
||||
For guidance on using `letsencrypt` to obtain a cert on `Ubuntu`, including an `nginx` plugin, see [this post](https://www.digitalocean.com/community/tutorials/how-to-secure-nginx-with-let-s-encrypt-on-ubuntu-20-04).
|
||||
|
||||
|
||||
## Example Traefik Configuration
|
||||
|
||||
Assumptions:
|
||||
|
||||
* `Traefik` version is `2.9` (other versions not tested).
|
||||
* `Traefik` is used for provisioning of Let's Encrypt certificates.
|
||||
* `Traefik` is running in `Docker`, using `docker compose` and labels for the static configuration. An equivalent setup using a Traefik config file is possible too (but not covered here).
|
||||
* Strict Transport Security is enabled.
|
||||
* Hostname for the relay is `relay.example.com`, email address for ACME certificates provider is `name@example.com`.
|
||||
* ipv6 is enabled, a viable private ipv6 subnet is specified in the example below.
|
||||
* Relay is running on port `8080`.
|
||||
|
||||
```
|
||||
version: '3'
|
||||
|
||||
networks:
|
||||
nostr:
|
||||
enable_ipv6: true
|
||||
ipam:
|
||||
config:
|
||||
- subnet: fd00:db8:a::/64
|
||||
gateway: fd00:db8:a::1
|
||||
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:v2.9
|
||||
networks:
|
||||
nostr:
|
||||
command:
|
||||
- "--log.level=ERROR"
|
||||
# letsencrypt configuration
|
||||
- "--certificatesResolvers.http.acme.email==name@example.com"
|
||||
- "--certificatesResolvers.http.acme.storage=/certs/acme.json"
|
||||
- "--certificatesResolvers.http.acme.httpChallenge.entryPoint=http"
|
||||
# define entrypoints
|
||||
- "--entryPoints.http.address=:80"
|
||||
- "--entryPoints.http.http.redirections.entryPoint.to=https"
|
||||
- "--entryPoints.http.http.redirections.entryPoint.scheme=https"
|
||||
- "--entryPoints.https.address=:443"
|
||||
- "--entryPoints.https.forwardedHeaders.insecure=true"
|
||||
- "--entryPoints.https.proxyProtocol.insecure=true"
|
||||
# docker provider (get configuration from container labels)
|
||||
- "--providers.docker.endpoint=unix:///var/run/docker.sock"
|
||||
- "--providers.docker.exposedByDefault=false"
|
||||
- "--providers.file.directory=/config"
|
||||
- "--providers.file.watch=true"
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||
- "$(pwd)/traefik/certs:/certs"
|
||||
- "$(pwd)/traefik/config:/config"
|
||||
logging:
|
||||
driver: "local"
|
||||
restart: always
|
||||
|
||||
# example nostr config. only labels: section is relevant for Traefik config
|
||||
nostr:
|
||||
image: nostr-rs-relay:latest
|
||||
container_name: nostr-relay
|
||||
networks:
|
||||
nostr:
|
||||
restart: always
|
||||
user: 100:100
|
||||
volumes:
|
||||
- '$(pwd)/nostr/data:/usr/src/app/db:Z'
|
||||
- '$(pwd)/nostr/config/config.toml:/usr/src/app/config.toml:ro,Z'
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.nostr.entrypoints=https"
|
||||
- "traefik.http.routers.nostr.rule=Host(`relay.example.com`)"
|
||||
- "traefik.http.routers.nostr.tls.certresolver=http"
|
||||
- "traefik.http.routers.nostr.service=nostr"
|
||||
- "traefik.http.services.nostr.loadbalancer.server.port=8080"
|
||||
- "traefik.http.services.nostr.loadbalancer.passHostHeader=true"
|
||||
- "traefik.http.middlewares.nostr.headers.sslredirect=true"
|
||||
- "traefik.http.middlewares.nostr.headers.stsincludesubdomains=true"
|
||||
- "traefik.http.middlewares.nostr.headers.stspreload=true"
|
||||
- "traefik.http.middlewares.nostr.headers.stsseconds=63072000"
|
||||
- "traefik.http.routers.nostr.middlewares=nostr"
|
||||
```
|
||||
|
||||
### Traefik Notes
|
||||
|
||||
Traefik will take care of the provisioning and renewal of certificates. In case of an ipv4-only relay, simply detele the `enable_ipv6:` and `ipam:` entries in the `networks:` section of the docker-compose file.
|
39
docs/run-as-linux-system-process.md
Normal file
39
docs/run-as-linux-system-process.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Run as a linux system process
|
||||
|
||||
Docker makes it easy to spin up and down environments but it's also possible to run `nostr-rs-relay` as a systemd linux process.
|
||||
This guide assumes you're on a Linux machine and that Rust is already installed.
|
||||
|
||||
## Instructions
|
||||
|
||||
### Build nostr-rs-relay from source
|
||||
Start by building the application from source. Here is how to do that:
|
||||
1. `git clone https://github.com/scsibug/nostr-rs-relay.git`
|
||||
2. `cd nostr-rs-relay`
|
||||
3. `cargo build --release`
|
||||
|
||||
### Place the files where they belong
|
||||
We want to place the nostr-rs-relay binary and the config.toml file where they belong. While still in the root level of the nostr-rs-relay folder you cloned in last step, run the following commands:
|
||||
1. `sudo cp target/release/nostr-rs-relay /usr/local/bin/`
|
||||
2. `sudo mkdir /etc/nostr-rs-relay`
|
||||
2. `sudo cp config.toml /etc/nostr-rs-relay`
|
||||
|
||||
### Create the Systemd service file
|
||||
We need to create a new Systemd service file. These files are placed in the `/etc/systemd/system/` folder where you will find many other services running.
|
||||
|
||||
1. `sudo vim /etc/systemd/system/nostr-rs-relay.service`
|
||||
2. Paste in the contents of [this service file](../contrib/nostr-rs-relay.service). Remember to replace the `User` value with your own username.
|
||||
3. Save the file and exit your text editor
|
||||
|
||||
|
||||
### Run the service
|
||||
To get the service running, we need to reload the systemd daemon and enable the service.
|
||||
|
||||
1. `sudo systemctl daemon-reload`
|
||||
2. `sudo systemctl start nostr-rs-relay.service`
|
||||
3. `sudo systemctl status nostr-rs-relay.service`
|
||||
|
||||
|
||||
### Tips
|
||||
|
||||
#### Logs
|
||||
The application will write logs to the journal. To read it, execute `sudo journalctl -f -u nostr-rs-relay`
|
@@ -179,7 +179,7 @@ attempts to persist them to disk. Once validated and persisted, these
|
||||
events are broadcast to all subscribers.
|
||||
|
||||
When verification is enabled, the writer must check to ensure a valid,
|
||||
unexpired verification record exists for the auther. All metadata
|
||||
unexpired verification record exists for the author. All metadata
|
||||
events (regardless of verification status) are forwarded to a verifier
|
||||
module. If the verifier determines a new verification record is
|
||||
needed, it is also responsible for persisting and broadcasting the
|
||||
|
1010
examples/nauthz/Cargo.lock
generated
Normal file
1010
examples/nauthz/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
13
examples/nauthz/Cargo.toml
Normal file
13
examples/nauthz/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "nauthz-server"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
# Common dependencies
|
||||
tokio = { version = "1.0", features = ["rt-multi-thread", "macros"] }
|
||||
prost = "0.11"
|
||||
tonic = "0.8.3"
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = { version="0.8.3", features = ["prost"] }
|
7
examples/nauthz/build.rs
Normal file
7
examples/nauthz/build.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
tonic_build::configure()
|
||||
.build_server(true)
|
||||
.protoc_arg("--experimental_allow_proto3_optional")
|
||||
.compile(&["../../proto/nauthz.proto"], &["../../proto"])?;
|
||||
Ok(())
|
||||
}
|
60
examples/nauthz/src/main.rs
Normal file
60
examples/nauthz/src/main.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
use tonic::{transport::Server, Request, Response, Status};
|
||||
|
||||
use nauthz_grpc::authorization_server::{Authorization, AuthorizationServer};
|
||||
use nauthz_grpc::{Decision, EventReply, EventRequest};
|
||||
|
||||
pub mod nauthz_grpc {
|
||||
tonic::include_proto!("nauthz");
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct EventAuthz {
|
||||
allowed_kinds: Vec<u64>,
|
||||
}
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl Authorization for EventAuthz {
|
||||
async fn event_admit(
|
||||
&self,
|
||||
request: Request<EventRequest>,
|
||||
) -> Result<Response<EventReply>, Status> {
|
||||
let reply;
|
||||
let req = request.into_inner();
|
||||
let event = req.event.unwrap();
|
||||
let content_prefix: String = event.content.chars().take(40).collect();
|
||||
println!("recvd event, [kind={}, origin={:?}, nip05_domain={:?}, tag_count={}, content_sample={:?}]",
|
||||
event.kind, req.origin, req.nip05.map(|x| x.domain), event.tags.len(), content_prefix);
|
||||
// Permit any event with a whitelisted kind
|
||||
if self.allowed_kinds.contains(&event.kind) {
|
||||
println!("This looks fine! (kind={})", event.kind);
|
||||
reply = nauthz_grpc::EventReply {
|
||||
decision: Decision::Permit as i32,
|
||||
message: None,
|
||||
};
|
||||
} else {
|
||||
println!("Blocked! (kind={})", event.kind);
|
||||
reply = nauthz_grpc::EventReply {
|
||||
decision: Decision::Deny as i32,
|
||||
message: Some(format!("kind {} not permitted", event.kind)),
|
||||
};
|
||||
}
|
||||
Ok(Response::new(reply))
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let addr = "[::1]:50051".parse().unwrap();
|
||||
|
||||
// A simple authorization engine that allows kinds 0-3
|
||||
let checker = EventAuthz {
|
||||
allowed_kinds: vec![0, 1, 2, 3],
|
||||
};
|
||||
println!("EventAuthz Server listening on {}", addr);
|
||||
// Start serving
|
||||
Server::builder()
|
||||
.add_service(AuthorizationServer::new(checker))
|
||||
.serve(addr)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
60
proto/nauthz.proto
Normal file
60
proto/nauthz.proto
Normal file
@@ -0,0 +1,60 @@
|
||||
syntax = "proto3";
|
||||
|
||||
// Nostr Authorization Services
|
||||
package nauthz;
|
||||
|
||||
// Authorization for actions against a relay
|
||||
service Authorization {
|
||||
// Determine if an event should be admitted to the relay
|
||||
rpc EventAdmit(EventRequest) returns (EventReply) {}
|
||||
}
|
||||
|
||||
message Event {
|
||||
bytes id = 1; // 32-byte SHA256 hash of serialized event
|
||||
bytes pubkey = 2; // 32-byte public key of event creator
|
||||
fixed64 created_at = 3; // UNIX timestamp provided by event creator
|
||||
uint64 kind = 4; // event kind
|
||||
string content = 5; // arbitrary event contents
|
||||
repeated TagEntry tags = 6; // event tag array
|
||||
bytes sig = 7; // 32-byte signature of the event id
|
||||
// Individual values for a single tag
|
||||
message TagEntry {
|
||||
repeated string values = 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Event data and metadata for authorization decisions
|
||||
message EventRequest {
|
||||
Event event =
|
||||
1; // the event to be admitted for further relay processing
|
||||
optional string ip_addr =
|
||||
2; // IP address of the client that submitted the event
|
||||
optional string origin =
|
||||
3; // HTTP origin header from the client, if one exists
|
||||
optional string user_agent =
|
||||
4; // HTTP user-agent header from the client, if one exists
|
||||
optional bytes auth_pubkey =
|
||||
5; // the public key associated with a NIP-42 AUTH'd session, if
|
||||
// authentication occurred
|
||||
optional Nip05Name nip05 =
|
||||
6; // NIP-05 address associated with the event pubkey, if it is
|
||||
// known and has been validated by the relay
|
||||
// A NIP_05 verification record
|
||||
message Nip05Name {
|
||||
string local = 1;
|
||||
string domain = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// A permit or deny decision
|
||||
enum Decision {
|
||||
DECISION_UNSPECIFIED = 0;
|
||||
DECISION_PERMIT = 1; // Admit this event for further processing
|
||||
DECISION_DENY = 2; // Deny persisting or propagating this event
|
||||
}
|
||||
|
||||
// Response to a event authorization request
|
||||
message EventReply {
|
||||
Decision decision = 1; // decision to enforce
|
||||
optional string message = 2; // informative message for the client
|
||||
}
|
109
reverse-proxy.md
109
reverse-proxy.md
@@ -1,109 +0,0 @@
|
||||
# Reverse Proxy Setup Guide
|
||||
|
||||
It is recommended to run `nostr-rs-relay` behind a reverse proxy such
|
||||
as `haproxy` or `nginx` to provide TLS termination. Simple examples
|
||||
of `haproxy` and `nginx` configurations are documented here.
|
||||
|
||||
## Minimal HAProxy Configuration
|
||||
|
||||
Assumptions:
|
||||
|
||||
* HAProxy version is `2.4.10` or greater (older versions not tested).
|
||||
* Hostname for the relay is `relay.example.com`.
|
||||
* Your relay should be available over wss://relay.example.com
|
||||
* Your (NIP-11) relay info page should be available on https://relay.example.com
|
||||
* SSL certificate is located in `/etc/certs/example.com.pem`.
|
||||
* Relay is running on port 8080.
|
||||
* Limit connections to 400 concurrent.
|
||||
* HSTS (HTTP Strict Transport Security) is desired.
|
||||
* Only TLS 1.2 or greater is allowed.
|
||||
|
||||
```
|
||||
global
|
||||
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
|
||||
ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
|
||||
|
||||
frontend fe_prod
|
||||
mode http
|
||||
bind :443 ssl crt /etc/certs/example.com.pem alpn h2,http/1.1
|
||||
bind :80
|
||||
http-request set-header X-Forwarded-Proto https if { ssl_fc }
|
||||
redirect scheme https code 301 if !{ ssl_fc }
|
||||
acl host_relay hdr(host) -i relay.example.com
|
||||
use_backend relay if host_relay
|
||||
# HSTS (1 year)
|
||||
http-response set-header Strict-Transport-Security max-age=31536000
|
||||
|
||||
backend relay
|
||||
mode http
|
||||
timeout connect 5s
|
||||
timeout client 50s
|
||||
timeout server 50s
|
||||
timeout tunnel 1h
|
||||
timeout client-fin 30s
|
||||
option tcp-check
|
||||
default-server maxconn 400 check inter 20s fastinter 1s
|
||||
server relay 127.0.0.1:8080
|
||||
```
|
||||
|
||||
### HAProxy Notes
|
||||
|
||||
You may experience WebSocket connection problems with Firefox if
|
||||
HTTP/2 is enabled, for older versions of HAProxy (2.3.x). Either
|
||||
disable HTTP/2 (`h2`), or upgrade HAProxy.
|
||||
|
||||
## Bare-bones Nginx Configuration
|
||||
|
||||
Assumptions:
|
||||
|
||||
* `Nginx` version is `1.18.0` (other versions not tested).
|
||||
* Hostname for the relay is `relay.example.com`.
|
||||
* SSL certificate and key are located at `/etc/letsencrypt/live/relay.example.com/`.
|
||||
* Relay is running on port `8080`.
|
||||
|
||||
```
|
||||
http {
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name relay.example.com;
|
||||
ssl_certificate /etc/letsencrypt/live/relay.example.com/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/relay.example.com/privkey.pem;
|
||||
ssl_protocols TLSv1.3 TLSv1.2;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_ecdh_curve secp521r1:secp384r1;
|
||||
ssl_ciphers EECDH+AESGCM:EECDH+AES256;
|
||||
|
||||
# Optional Diffie-Helmann parameters
|
||||
# Generate with openssl dhparam -out /etc/ssl/certs/dhparam.pem 4096
|
||||
#ssl_dhparam /etc/ssl/certs/dhparam.pem;
|
||||
|
||||
ssl_session_cache shared:TLS:2m;
|
||||
ssl_buffer_size 4k;
|
||||
|
||||
# OCSP stapling
|
||||
ssl_stapling on;
|
||||
ssl_stapling_verify on;
|
||||
resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001]; # Cloudflare
|
||||
|
||||
# Set HSTS to 365 days
|
||||
add_header Strict-Transport-Security 'max-age=31536000; includeSubDomains; preload' always;
|
||||
keepalive_timeout 70;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:8080;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Nginx Notes
|
||||
|
||||
The above configuration was tested on `nginx` `1.18.0` was tested on `Ubuntu 20.04`.
|
||||
|
||||
For help installing `nginx` on `Ubuntu`, see [this guide](https://www.digitalocean.com/community/tutorials/how-to-install-nginx-on-ubuntu-20-04).
|
||||
|
||||
For guidance on using `letsencrypt` to obtain a cert on `Ubuntu`, including an `nginx` plugin, see [this post](https://www.digitalocean.com/community/tutorials/how-to-secure-nginx-with-let-s-encrypt-on-ubuntu-20-04).
|
@@ -1,16 +1,16 @@
|
||||
use nostr_rs_relay::config;
|
||||
use nostr_rs_relay::error::{Error, Result};
|
||||
use nostr_rs_relay::event::{single_char_tagname, Event};
|
||||
use nostr_rs_relay::repo::sqlite::{build_pool, PooledConnection};
|
||||
use nostr_rs_relay::repo::sqlite_migration::{curr_db_version, DB_VERSION};
|
||||
use nostr_rs_relay::utils::is_lower_hex;
|
||||
use rusqlite::params;
|
||||
use rusqlite::{OpenFlags, Transaction};
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use nostr_rs_relay::utils::is_lower_hex;
|
||||
use tracing::info;
|
||||
use nostr_rs_relay::config;
|
||||
use nostr_rs_relay::event::{Event,single_char_tagname};
|
||||
use nostr_rs_relay::error::{Error, Result};
|
||||
use nostr_rs_relay::repo::sqlite::{PooledConnection, build_pool};
|
||||
use nostr_rs_relay::repo::sqlite_migration::{curr_db_version, DB_VERSION};
|
||||
use rusqlite::{OpenFlags, Transaction};
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
use rusqlite::params;
|
||||
use tracing::info;
|
||||
|
||||
/// Bulk load JSONL data from STDIN to the database specified in config.toml (or ./nostr.db as a default).
|
||||
/// The database must already exist, this will not create a new one.
|
||||
@@ -20,95 +20,101 @@ pub fn main() -> Result<()> {
|
||||
let _trace_sub = tracing_subscriber::fmt::try_init();
|
||||
println!("Nostr-rs-relay Bulk Loader");
|
||||
// check for a database file, or create one.
|
||||
let settings = config::Settings::new();
|
||||
let settings = config::Settings::new(&None)?;
|
||||
if !Path::new(&settings.database.data_directory).is_dir() {
|
||||
info!("Database directory does not exist");
|
||||
return Err(Error::DatabaseDirError);
|
||||
}
|
||||
// Get a database pool
|
||||
let pool = build_pool("bulk-loader", &settings, OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, 1,4,false);
|
||||
let pool = build_pool(
|
||||
"bulk-loader",
|
||||
&settings,
|
||||
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
|
||||
1,
|
||||
4,
|
||||
false,
|
||||
);
|
||||
{
|
||||
// check for database schema version
|
||||
let mut conn: PooledConnection = pool.get()?;
|
||||
let version = curr_db_version(&mut conn)?;
|
||||
info!("current version is: {:?}", version);
|
||||
// ensure the schema version is current.
|
||||
if version != DB_VERSION {
|
||||
info!("version is not current, exiting");
|
||||
panic!("cannot write to schema other than v{}", DB_VERSION);
|
||||
}
|
||||
// check for database schema version
|
||||
let mut conn: PooledConnection = pool.get()?;
|
||||
let version = curr_db_version(&mut conn)?;
|
||||
info!("current version is: {:?}", version);
|
||||
// ensure the schema version is current.
|
||||
if version != DB_VERSION {
|
||||
info!("version is not current, exiting");
|
||||
panic!("cannot write to schema other than v{DB_VERSION}");
|
||||
}
|
||||
}
|
||||
// this channel will contain parsed events ready to be inserted
|
||||
let (event_tx, event_rx) = mpsc::sync_channel(100_000);
|
||||
// Thread for reading events
|
||||
let _stdin_reader_handler = thread::spawn(move || {
|
||||
let stdin = io::stdin();
|
||||
for readline in stdin.lines() {
|
||||
if let Ok(line) = readline {
|
||||
// try to parse a nostr event
|
||||
let eres: Result<Event, serde_json::Error> = serde_json::from_str(&line);
|
||||
if let Ok(mut e) = eres {
|
||||
if let Ok(()) = e.validate() {
|
||||
e.build_index();
|
||||
//debug!("Event: {:?}", e);
|
||||
event_tx.send(Some(e)).ok();
|
||||
} else {
|
||||
info!("could not validate event");
|
||||
}
|
||||
} else {
|
||||
info!("error reading event: {:?}", eres);
|
||||
}
|
||||
} else {
|
||||
// error reading
|
||||
info!("error reading: {:?}", readline);
|
||||
}
|
||||
}
|
||||
info!("finished parsing events");
|
||||
event_tx.send(None).ok();
|
||||
let ok: Result<()> = Ok(());
|
||||
let stdin = io::stdin();
|
||||
for readline in stdin.lines() {
|
||||
if let Ok(line) = readline {
|
||||
// try to parse a nostr event
|
||||
let eres: Result<Event, serde_json::Error> = serde_json::from_str(&line);
|
||||
if let Ok(mut e) = eres {
|
||||
if let Ok(()) = e.validate() {
|
||||
e.build_index();
|
||||
//debug!("Event: {:?}", e);
|
||||
event_tx.send(Some(e)).ok();
|
||||
} else {
|
||||
info!("could not validate event");
|
||||
}
|
||||
} else {
|
||||
info!("error reading event: {:?}", eres);
|
||||
}
|
||||
} else {
|
||||
// error reading
|
||||
info!("error reading: {:?}", readline);
|
||||
}
|
||||
}
|
||||
info!("finished parsing events");
|
||||
event_tx.send(None).ok();
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
});
|
||||
let mut conn: PooledConnection = pool.get()?;
|
||||
let mut events_read = 0;
|
||||
let event_batch_size =50_000;
|
||||
let event_batch_size = 50_000;
|
||||
let mut new_events = 0;
|
||||
let mut has_more_events = true;
|
||||
while has_more_events {
|
||||
// begin a transaction
|
||||
let tx = conn.transaction()?;
|
||||
// read in batch_size events and commit
|
||||
for _ in 0..event_batch_size {
|
||||
match event_rx.recv() {
|
||||
Ok(Some(e)) => {
|
||||
events_read += 1;
|
||||
// ignore ephemeral events
|
||||
if !(e.kind >= 20000 && e.kind < 30000) {
|
||||
match write_event(&tx, e) {
|
||||
Ok(c) => {
|
||||
new_events += c;
|
||||
},
|
||||
Err(e) => {
|
||||
info!("error inserting event: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
Ok(None) => {
|
||||
// signal that the sender will never produce more
|
||||
// events
|
||||
has_more_events=false;
|
||||
break;
|
||||
},
|
||||
Err(_) => {
|
||||
info!("sender is closed");
|
||||
// sender is done
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("committed {} events...", new_events);
|
||||
tx.commit()?;
|
||||
conn.execute_batch("pragma wal_checkpoint(truncate)")?;
|
||||
|
||||
// begin a transaction
|
||||
let tx = conn.transaction()?;
|
||||
// read in batch_size events and commit
|
||||
for _ in 0..event_batch_size {
|
||||
match event_rx.recv() {
|
||||
Ok(Some(e)) => {
|
||||
events_read += 1;
|
||||
// ignore ephemeral events
|
||||
if !(e.kind >= 20000 && e.kind < 30000) {
|
||||
match write_event(&tx, e) {
|
||||
Ok(c) => {
|
||||
new_events += c;
|
||||
}
|
||||
Err(e) => {
|
||||
info!("error inserting event: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
// signal that the sender will never produce more
|
||||
// events
|
||||
has_more_events = false;
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
info!("sender is closed");
|
||||
// sender is done
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("committed {} events...", new_events);
|
||||
tx.commit()?;
|
||||
conn.execute_batch("pragma wal_checkpoint(truncate)")?;
|
||||
}
|
||||
info!("processed {} events", events_read);
|
||||
info!("stored {} new events", new_events);
|
||||
@@ -131,7 +137,7 @@ fn write_event(tx: &Transaction, e: Event) -> Result<usize> {
|
||||
params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str]
|
||||
)?;
|
||||
if ins_count == 0 {
|
||||
return Ok(0);
|
||||
return Ok(0);
|
||||
}
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
let event_id = tx.last_insert_rowid();
|
||||
@@ -140,30 +146,30 @@ fn write_event(tx: &Transaction, e: Event) -> Result<usize> {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagnamechar_opt = single_char_tagname(tagname);
|
||||
if tagnamechar_opt.is_none() {
|
||||
continue;
|
||||
continue;
|
||||
}
|
||||
// safe because len was > 1
|
||||
let tagval = t.get(1).unwrap();
|
||||
// insert as BLOB if we can restore it losslessly.
|
||||
// this means it needs to be even length and lowercase.
|
||||
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
|
||||
tx.execute(
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tagname, hex::decode(tagval).ok()],
|
||||
)?;
|
||||
)?;
|
||||
} else {
|
||||
// otherwise, insert as text
|
||||
tx.execute(
|
||||
// otherwise, insert as text
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);",
|
||||
params![event_id, tagname, &tagval],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
if e.is_replaceable() {
|
||||
//let query = "SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1;";
|
||||
//let count: usize = tx.query_row(query, params![e.kind, pubkey_blob], |row| row.get(0))?;
|
||||
//info!("found {} rows that /would/ be preserved", count);
|
||||
match tx.execute(
|
||||
//let query = "SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1;";
|
||||
//let count: usize = tx.query_row(query, params![e.kind, pubkey_blob], |row| row.get(0))?;
|
||||
//info!("found {} rows that /would/ be preserved", count);
|
||||
match tx.execute(
|
||||
"DELETE FROM event WHERE kind=? and author=? and id NOT IN (SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1);",
|
||||
params![e.kind, pubkey_blob, e.kind, pubkey_blob],
|
||||
) {
|
||||
|
@@ -7,7 +7,14 @@ pub struct CLIArgs {
|
||||
short,
|
||||
long,
|
||||
help = "Use the <directory> as the location of the database",
|
||||
required = false,
|
||||
required = false
|
||||
)]
|
||||
pub db: Option<String>,
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
help = "Use the <file name> as the location of the config file",
|
||||
required = false
|
||||
)]
|
||||
pub config: Option<String>,
|
||||
}
|
||||
|
119
src/config.rs
119
src/config.rs
@@ -1,8 +1,8 @@
|
||||
//! Configuration file and settings management
|
||||
use crate::payment::Processor;
|
||||
use config::{Config, ConfigError, File};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Duration;
|
||||
use tracing::warn;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[allow(unused)]
|
||||
@@ -12,6 +12,8 @@ pub struct Info {
|
||||
pub description: Option<String>,
|
||||
pub pubkey: Option<String>,
|
||||
pub contact: Option<String>,
|
||||
pub favicon: Option<String>,
|
||||
pub relay_icon: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -23,6 +25,13 @@ pub struct Database {
|
||||
pub min_conn: u32,
|
||||
pub max_conn: u32,
|
||||
pub connection: String,
|
||||
pub connection_write: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Grpc {
|
||||
pub event_admission_server: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -62,13 +71,31 @@ pub struct Limits {
|
||||
pub max_ws_frame_bytes: Option<usize>,
|
||||
pub broadcast_buffer: usize, // events to buffer for subscribers (prevents slow readers from consuming memory)
|
||||
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
|
||||
pub event_kind_blacklist: Option<Vec<u64>>
|
||||
pub event_kind_blacklist: Option<Vec<u64>>,
|
||||
pub event_kind_allowlist: Option<Vec<u64>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Authorization {
|
||||
pub pubkey_whitelist: Option<Vec<String>>, // If present, only allow these pubkeys to publish events
|
||||
pub nip42_auth: bool, // if true enables NIP-42 authentication
|
||||
pub nip42_dms: bool, // if true send DMs only to their authenticated recipients
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct PayToRelay {
|
||||
pub enabled: bool,
|
||||
pub admission_cost: u64, // Cost to have pubkey whitelisted
|
||||
pub cost_per_event: u64, // Cost author to pay per event
|
||||
pub node_url: String,
|
||||
pub api_secret: String,
|
||||
pub terms_message: String,
|
||||
pub sign_ups: bool, // allow new users to sign up to relay
|
||||
pub direct_message: bool, // Send direct message to user with invoice and terms
|
||||
pub secret_key: Option<String>,
|
||||
pub processor: Processor,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -139,42 +166,66 @@ impl VerifiedUsers {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Logging {
|
||||
pub folder_path: Option<String>,
|
||||
pub file_prefix: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Settings {
|
||||
pub info: Info,
|
||||
pub diagnostics: Diagnostics,
|
||||
pub database: Database,
|
||||
pub grpc: Grpc,
|
||||
pub network: Network,
|
||||
pub limits: Limits,
|
||||
pub authorization: Authorization,
|
||||
pub pay_to_relay: PayToRelay,
|
||||
pub verified_users: VerifiedUsers,
|
||||
pub retention: Retention,
|
||||
pub options: Options,
|
||||
pub logging: Logging,
|
||||
}
|
||||
|
||||
impl Settings {
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
pub fn new(config_file_name: &Option<String>) -> Result<Self, ConfigError> {
|
||||
let default_settings = Self::default();
|
||||
// attempt to construct settings with file
|
||||
let from_file = Self::new_from_default(&default_settings);
|
||||
let from_file = Self::new_from_default(&default_settings, config_file_name);
|
||||
match from_file {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
warn!("Error reading config file ({:?})", e);
|
||||
default_settings
|
||||
// pass up the parse error if the config file was specified,
|
||||
// otherwise use the default config (with a warning).
|
||||
if config_file_name.is_some() {
|
||||
Err(e)
|
||||
} else {
|
||||
eprintln!("Error reading config file ({:?})", e);
|
||||
eprintln!("WARNING: Default configuration settings will be used");
|
||||
Ok(default_settings)
|
||||
}
|
||||
}
|
||||
ok => ok,
|
||||
}
|
||||
}
|
||||
|
||||
fn new_from_default(default: &Settings) -> Result<Self, ConfigError> {
|
||||
fn new_from_default(
|
||||
default: &Settings,
|
||||
config_file_name: &Option<String>,
|
||||
) -> Result<Self, ConfigError> {
|
||||
let default_config_file_name = "config.toml".to_string();
|
||||
let config: &String = match config_file_name {
|
||||
Some(value) => value,
|
||||
None => &default_config_file_name,
|
||||
};
|
||||
let builder = Config::builder();
|
||||
let config: Config = builder
|
||||
// use defaults
|
||||
// use defaults
|
||||
.add_source(Config::try_from(default)?)
|
||||
// override with file contents
|
||||
.add_source(File::with_name("config.toml"))
|
||||
// override with file contents
|
||||
.add_source(File::with_name(config))
|
||||
.build()?;
|
||||
let mut settings: Settings = config.try_deserialize()?;
|
||||
// ensure connection pool size is logical
|
||||
@@ -191,6 +242,23 @@ impl Settings {
|
||||
);
|
||||
// initialize durations for verified users
|
||||
settings.verified_users.init();
|
||||
|
||||
// Validate pay to relay settings
|
||||
if settings.pay_to_relay.enabled {
|
||||
assert_ne!(settings.pay_to_relay.api_secret, "");
|
||||
// Should check that url is valid
|
||||
assert_ne!(settings.pay_to_relay.node_url, "");
|
||||
assert_ne!(settings.pay_to_relay.terms_message, "");
|
||||
|
||||
if settings.pay_to_relay.direct_message {
|
||||
assert_ne!(
|
||||
settings.pay_to_relay.secret_key,
|
||||
Some("<nostr nsec>".to_string())
|
||||
);
|
||||
assert!(settings.pay_to_relay.secret_key.is_some());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(settings)
|
||||
}
|
||||
}
|
||||
@@ -204,6 +272,8 @@ impl Default for Settings {
|
||||
description: None,
|
||||
pubkey: None,
|
||||
contact: None,
|
||||
favicon: None,
|
||||
relay_icon: None,
|
||||
},
|
||||
diagnostics: Diagnostics { tracing: false },
|
||||
database: Database {
|
||||
@@ -212,7 +282,11 @@ impl Default for Settings {
|
||||
in_memory: false,
|
||||
min_conn: 4,
|
||||
max_conn: 8,
|
||||
connection: "".to_owned(),
|
||||
connection: "".to_owned(),
|
||||
connection_write: None,
|
||||
},
|
||||
grpc: Grpc {
|
||||
event_admission_server: None,
|
||||
},
|
||||
network: Network {
|
||||
port: 8080,
|
||||
@@ -231,9 +305,24 @@ impl Default for Settings {
|
||||
broadcast_buffer: 16384,
|
||||
event_persist_buffer: 4096,
|
||||
event_kind_blacklist: None,
|
||||
event_kind_allowlist: None,
|
||||
},
|
||||
authorization: Authorization {
|
||||
pubkey_whitelist: None, // Allow any address to publish
|
||||
nip42_auth: false, // Disable NIP-42 authentication
|
||||
nip42_dms: false, // Send DMs to everybody
|
||||
},
|
||||
pay_to_relay: PayToRelay {
|
||||
enabled: false,
|
||||
admission_cost: 4200,
|
||||
cost_per_event: 0,
|
||||
terms_message: "".to_string(),
|
||||
node_url: "".to_string(),
|
||||
api_secret: "".to_string(),
|
||||
sign_ups: false,
|
||||
direct_message: true,
|
||||
secret_key: None,
|
||||
processor: Processor::LNBits,
|
||||
},
|
||||
verified_users: VerifiedUsers {
|
||||
mode: VerifiedUsersMode::Disabled,
|
||||
@@ -254,6 +343,10 @@ impl Default for Settings {
|
||||
options: Options {
|
||||
reject_future_seconds: None, // Reject events in the future if defined
|
||||
},
|
||||
logging: Logging {
|
||||
folder_path: None,
|
||||
file_prefix: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
124
src/conn.rs
124
src/conn.rs
@@ -1,16 +1,30 @@
|
||||
//! Client connection state
|
||||
use crate::close::Close;
|
||||
use crate::error::Error;
|
||||
use crate::error::Result;
|
||||
|
||||
use crate::subscription::Subscription;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use tracing::{debug, trace};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::close::Close;
|
||||
use crate::conn::Nip42AuthState::{AuthPubkey, Challenge, NoAuth};
|
||||
use crate::error::Error;
|
||||
use crate::error::Result;
|
||||
use crate::event::Event;
|
||||
use crate::subscription::Subscription;
|
||||
use crate::utils::{host_str, unix_time};
|
||||
|
||||
/// A subscription identifier has a maximum length
|
||||
const MAX_SUBSCRIPTION_ID_LEN: usize = 256;
|
||||
|
||||
/// NIP-42 authentication state
|
||||
pub enum Nip42AuthState {
|
||||
/// The client is not authenticated yet
|
||||
NoAuth,
|
||||
/// The AUTH challenge sent
|
||||
Challenge(String),
|
||||
/// The client is authenticated
|
||||
AuthPubkey(String),
|
||||
}
|
||||
|
||||
/// State for a client connection
|
||||
pub struct ClientConn {
|
||||
/// Client IP (either from socket, or configured proxy header
|
||||
@@ -21,6 +35,8 @@ pub struct ClientConn {
|
||||
subscriptions: HashMap<String, Subscription>,
|
||||
/// Per-connection maximum concurrent subscriptions
|
||||
max_subs: usize,
|
||||
/// NIP-42 AUTH
|
||||
auth: Nip42AuthState,
|
||||
}
|
||||
|
||||
impl Default for ClientConn {
|
||||
@@ -39,15 +55,18 @@ impl ClientConn {
|
||||
client_id,
|
||||
subscriptions: HashMap::new(),
|
||||
max_subs: 32,
|
||||
auth: NoAuth,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use] pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
|
||||
#[must_use]
|
||||
pub fn subscriptions(&self) -> &HashMap<String, Subscription> {
|
||||
&self.subscriptions
|
||||
}
|
||||
|
||||
/// Check if the given subscription already exists
|
||||
#[must_use] pub fn has_subscription(&self, sub: &Subscription) -> bool {
|
||||
#[must_use]
|
||||
pub fn has_subscription(&self, sub: &Subscription) -> bool {
|
||||
self.subscriptions.values().any(|x| x == sub)
|
||||
}
|
||||
|
||||
@@ -63,6 +82,22 @@ impl ClientConn {
|
||||
&self.client_ip_addr
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn auth_pubkey(&self) -> Option<&String> {
|
||||
match &self.auth {
|
||||
AuthPubkey(pubkey) => Some(pubkey),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn auth_challenge(&self) -> Option<&String> {
|
||||
match &self.auth {
|
||||
Challenge(pubkey) => Some(pubkey),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a new subscription for this connection.
|
||||
/// # Errors
|
||||
///
|
||||
@@ -116,4 +151,79 @@ impl ClientConn {
|
||||
self.get_client_prefix(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn generate_auth_challenge(&mut self) {
|
||||
self.auth = Challenge(Uuid::new_v4().to_string());
|
||||
}
|
||||
|
||||
pub fn authenticate(&mut self, event: &Event, relay_url: &str) -> Result<()> {
|
||||
match &self.auth {
|
||||
Challenge(_) => (),
|
||||
AuthPubkey(_) => {
|
||||
// already authenticated
|
||||
return Ok(());
|
||||
}
|
||||
NoAuth => {
|
||||
// unexpected AUTH request
|
||||
return Err(Error::AuthFailure);
|
||||
}
|
||||
}
|
||||
match event.validate() {
|
||||
Ok(_) => {
|
||||
if event.kind != 22242 {
|
||||
return Err(Error::AuthFailure);
|
||||
}
|
||||
|
||||
let curr_time = unix_time();
|
||||
let past_cutoff = curr_time - 600; // 10 minutes
|
||||
let future_cutoff = curr_time + 600; // 10 minutes
|
||||
if event.created_at < past_cutoff || event.created_at > future_cutoff {
|
||||
return Err(Error::AuthFailure);
|
||||
}
|
||||
|
||||
let mut challenge: Option<&str> = None;
|
||||
let mut relay: Option<&str> = None;
|
||||
|
||||
for tag in &event.tags {
|
||||
if tag.len() == 2 && tag.get(0) == Some(&"challenge".into()) {
|
||||
challenge = tag.get(1).map(|x| x.as_str());
|
||||
}
|
||||
if tag.len() == 2 && tag.get(0) == Some(&"relay".into()) {
|
||||
relay = tag.get(1).map(|x| x.as_str());
|
||||
}
|
||||
}
|
||||
|
||||
match (challenge, &self.auth) {
|
||||
(Some(received_challenge), Challenge(sent_challenge)) => {
|
||||
if received_challenge != sent_challenge {
|
||||
return Err(Error::AuthFailure);
|
||||
}
|
||||
}
|
||||
(_, _) => {
|
||||
return Err(Error::AuthFailure);
|
||||
}
|
||||
}
|
||||
|
||||
match (relay.and_then(host_str), host_str(relay_url)) {
|
||||
(Some(received_relay), Some(our_relay)) => {
|
||||
if received_relay != our_relay {
|
||||
return Err(Error::AuthFailure);
|
||||
}
|
||||
}
|
||||
(_, _) => {
|
||||
return Err(Error::AuthFailure);
|
||||
}
|
||||
}
|
||||
|
||||
self.auth = AuthPubkey(event.pubkey.clone());
|
||||
trace!(
|
||||
"authenticated pubkey {} (cid: {})",
|
||||
event.pubkey.chars().take(8).collect::<String>(),
|
||||
self.get_client_prefix()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
Err(_) => Err(Error::AuthFailure),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
268
src/db.rs
268
src/db.rs
@@ -2,21 +2,25 @@
|
||||
use crate::config::Settings;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::event::Event;
|
||||
use crate::nauthz;
|
||||
use crate::notice::Notice;
|
||||
use crate::payment::PaymentMessage;
|
||||
use crate::repo::postgres::{PostgresPool, PostgresRepo};
|
||||
use crate::repo::sqlite::SqliteRepo;
|
||||
use crate::repo::NostrRepo;
|
||||
use crate::server::NostrMetrics;
|
||||
use governor::clock::Clock;
|
||||
use governor::{Quota, RateLimiter};
|
||||
use log::LevelFilter;
|
||||
use nostr::key::FromPkStr;
|
||||
use nostr::key::Keys;
|
||||
use r2d2;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use sqlx::pool::PoolOptions;
|
||||
use sqlx::postgres::PgConnectOptions;
|
||||
use sqlx::ConnectOptions;
|
||||
use crate::repo::sqlite::SqliteRepo;
|
||||
use crate::repo::postgres::{PostgresRepo,PostgresPool};
|
||||
use crate::repo::NostrRepo;
|
||||
use std::time::{Instant, Duration};
|
||||
use tracing::log::LevelFilter;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
use tracing::{debug, info, trace, warn};
|
||||
|
||||
pub type SqlitePool = r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>;
|
||||
@@ -26,6 +30,10 @@ pub type PooledConnection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnection
|
||||
pub struct SubmittedEvent {
|
||||
pub event: Event,
|
||||
pub notice_tx: tokio::sync::mpsc::Sender<Notice>,
|
||||
pub source_ip: String,
|
||||
pub origin: Option<String>,
|
||||
pub user_agent: Option<String>,
|
||||
pub auth_pubkey: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
/// Database file
|
||||
@@ -37,8 +45,8 @@ pub const DB_FILE: &str = "nostr.db";
|
||||
/// Will panic if the pool could not be created.
|
||||
pub async fn build_repo(settings: &Settings, metrics: NostrMetrics) -> Arc<dyn NostrRepo> {
|
||||
match settings.database.engine.as_str() {
|
||||
"sqlite" => {Arc::new(build_sqlite_pool(settings, metrics).await)},
|
||||
"postgres" => {Arc::new(build_postgres_pool(settings, metrics).await)},
|
||||
"sqlite" => Arc::new(build_sqlite_pool(settings, metrics).await),
|
||||
"postgres" => Arc::new(build_postgres_pool(settings, metrics).await),
|
||||
_ => panic!("Unknown database engine"),
|
||||
}
|
||||
}
|
||||
@@ -62,10 +70,31 @@ async fn build_postgres_pool(settings: &Settings, metrics: NostrMetrics) -> Post
|
||||
.connect_with(options)
|
||||
.await
|
||||
.unwrap();
|
||||
let repo = PostgresRepo::new(pool, metrics);
|
||||
|
||||
let write_pool: PostgresPool = match &settings.database.connection_write {
|
||||
Some(cfg_write) => {
|
||||
let mut options_write: PgConnectOptions = cfg_write.as_str().parse().unwrap();
|
||||
options_write.log_statements(LevelFilter::Debug);
|
||||
options_write.log_slow_statements(LevelFilter::Warn, Duration::from_secs(60));
|
||||
|
||||
PoolOptions::new()
|
||||
.max_connections(settings.database.max_conn)
|
||||
.min_connections(settings.database.min_conn)
|
||||
.idle_timeout(Duration::from_secs(60))
|
||||
.connect_with(options_write)
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
None => pool.clone(),
|
||||
};
|
||||
|
||||
let repo = PostgresRepo::new(pool, write_pool, metrics);
|
||||
|
||||
// Panic on migration failure
|
||||
let version = repo.migrate_up().await.unwrap();
|
||||
info!("Postgres migration completed, at v{}", version);
|
||||
// startup scheduled tasks
|
||||
repo.start().await.ok();
|
||||
repo
|
||||
}
|
||||
|
||||
@@ -76,6 +105,7 @@ pub async fn db_writer(
|
||||
mut event_rx: tokio::sync::mpsc::Receiver<SubmittedEvent>,
|
||||
bcast_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
metadata_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
payment_tx: tokio::sync::broadcast::Sender<PaymentMessage>,
|
||||
mut shutdown: tokio::sync::broadcast::Receiver<()>,
|
||||
) -> Result<()> {
|
||||
// are we performing NIP-05 checking?
|
||||
@@ -83,6 +113,10 @@ pub async fn db_writer(
|
||||
// are we requriing NIP-05 user verification?
|
||||
let nip05_enabled = settings.verified_users.is_enabled();
|
||||
|
||||
let pay_to_relay_enabled = settings.pay_to_relay.enabled;
|
||||
let cost_per_event = settings.pay_to_relay.cost_per_event;
|
||||
debug!("Pay to relay: {}", pay_to_relay_enabled);
|
||||
|
||||
//upgrade_db(&mut pool.get()?)?;
|
||||
|
||||
// Make a copy of the whitelist
|
||||
@@ -100,6 +134,18 @@ pub async fn db_writer(
|
||||
lim_opt = Some(RateLimiter::direct(Quota::per_minute(quota)));
|
||||
}
|
||||
}
|
||||
// create a client if GRPC is enabled.
|
||||
// Check with externalized event admitter service, if one is defined.
|
||||
let mut grpc_client = if let Some(svr) = settings.grpc.event_admission_server {
|
||||
Some(nauthz::EventAuthzService::connect(&svr).await)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
//let gprc_client = settings.grpc.event_admission_server.map(|s| {
|
||||
// event_admitter_connect(&s);
|
||||
// });
|
||||
|
||||
loop {
|
||||
if shutdown.try_recv().is_ok() {
|
||||
info!("shutting down database writer");
|
||||
@@ -117,24 +163,6 @@ pub async fn db_writer(
|
||||
let subm_event = next_event.unwrap();
|
||||
let event = subm_event.event;
|
||||
let notice_tx = subm_event.notice_tx;
|
||||
// check if this event is authorized.
|
||||
if let Some(allowed_addrs) = whitelist {
|
||||
// TODO: incorporate delegated pubkeys
|
||||
// if the event address is not in allowed_addrs.
|
||||
if !allowed_addrs.contains(&event.pubkey) {
|
||||
debug!(
|
||||
"rejecting event: {}, unauthorized author",
|
||||
event.get_event_id_prefix()
|
||||
);
|
||||
notice_tx
|
||||
.try_send(Notice::blocked(
|
||||
event.id,
|
||||
"pubkey is not allowed to publish to this relay",
|
||||
))
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Check that event kind isn't blacklisted
|
||||
let kinds_blacklist = &settings.limits.event_kind_blacklist.clone();
|
||||
@@ -146,15 +174,113 @@ pub async fn db_writer(
|
||||
&event.kind
|
||||
);
|
||||
notice_tx
|
||||
.try_send(Notice::blocked(
|
||||
event.id,
|
||||
"event kind is blocked by relay"
|
||||
))
|
||||
.try_send(Notice::blocked(event.id, "event kind is blocked by relay"))
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Check that event kind isn't allowlisted
|
||||
let kinds_allowlist = &settings.limits.event_kind_allowlist.clone();
|
||||
if let Some(event_kind_allowlist) = kinds_allowlist {
|
||||
if !event_kind_allowlist.contains(&event.kind) {
|
||||
debug!(
|
||||
"rejecting event: {}, allowlist kind: {}",
|
||||
&event.get_event_id_prefix(),
|
||||
&event.kind
|
||||
);
|
||||
notice_tx
|
||||
.try_send(Notice::blocked(event.id, "event kind is blocked by relay"))
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Set to none until balance is got from db
|
||||
// Will stay none if user in whitelisted and does not have to pay to post
|
||||
// When pay to relay is enabled the whitelist is not a list of who can post
|
||||
// It is a list of who can post for free
|
||||
let mut user_balance: Option<u64> = None;
|
||||
if !pay_to_relay_enabled {
|
||||
// check if this event is authorized.
|
||||
if let Some(allowed_addrs) = whitelist {
|
||||
// TODO: incorporate delegated pubkeys
|
||||
// if the event address is not in allowed_addrs.
|
||||
if !allowed_addrs.contains(&event.pubkey) {
|
||||
debug!(
|
||||
"rejecting event: {}, unauthorized author",
|
||||
event.get_event_id_prefix()
|
||||
);
|
||||
notice_tx
|
||||
.try_send(Notice::blocked(
|
||||
event.id,
|
||||
"pubkey is not allowed to publish to this relay",
|
||||
))
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If the user is on whitelist there is no need to check if the user is admitted or has balance to post
|
||||
if whitelist.is_none()
|
||||
|| (whitelist.is_some() && !whitelist.as_ref().unwrap().contains(&event.pubkey))
|
||||
{
|
||||
let key = Keys::from_pk_str(&event.pubkey).unwrap();
|
||||
match repo.get_account_balance(&key).await {
|
||||
Ok((user_admitted, balance)) => {
|
||||
// Checks to make sure user is admitted
|
||||
if !user_admitted {
|
||||
debug!("user: {}, is not admitted", &event.pubkey);
|
||||
|
||||
// If the user is in DB but not admitted
|
||||
// Send meeage to payment thread to check if outstanding invoice has been paid
|
||||
payment_tx
|
||||
.send(PaymentMessage::CheckAccount(event.pubkey))
|
||||
.ok();
|
||||
notice_tx
|
||||
.try_send(Notice::blocked(event.id, "User is not admitted"))
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Checks that user has enough balance to post
|
||||
// TODO: this should send an invoice to user to top up
|
||||
if balance < cost_per_event {
|
||||
debug!("user: {}, does not have a balance", &event.pubkey,);
|
||||
notice_tx
|
||||
.try_send(Notice::blocked(event.id, "Insufficient balance"))
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
user_balance = Some(balance);
|
||||
debug!("User balance: {:?}", user_balance);
|
||||
}
|
||||
Err(
|
||||
Error::SqlError(rusqlite::Error::QueryReturnedNoRows)
|
||||
| Error::SqlxError(sqlx::Error::RowNotFound),
|
||||
) => {
|
||||
// User does not exist
|
||||
info!("Unregistered user");
|
||||
if settings.pay_to_relay.sign_ups {
|
||||
payment_tx
|
||||
.send(PaymentMessage::NewAccount(event.pubkey))
|
||||
.ok();
|
||||
}
|
||||
let msg = "Pubkey not registered";
|
||||
notice_tx.try_send(Notice::error(event.id, msg)).ok();
|
||||
continue;
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("Error checking admission status: {:?}", err);
|
||||
let msg = "relay experienced an error checking your admission status";
|
||||
notice_tx.try_send(Notice::error(event.id, msg)).ok();
|
||||
// Other error
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// send any metadata events to the NIP-05 verifier
|
||||
if nip05_active && event.is_kind_metadata() {
|
||||
// we are sending this prior to even deciding if we
|
||||
@@ -164,9 +290,16 @@ pub async fn db_writer(
|
||||
metadata_tx.send(event.clone()).ok();
|
||||
}
|
||||
|
||||
// get a validation result for use in verification and GPRC
|
||||
let validation = if nip05_active {
|
||||
Some(repo.get_latest_user_verification(&event.pubkey).await)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// check for NIP-05 verification
|
||||
if nip05_enabled {
|
||||
match repo.get_latest_user_verification(&event.pubkey).await {
|
||||
if nip05_enabled && validation.is_some() {
|
||||
match validation.as_ref().unwrap() {
|
||||
Ok(uv) => {
|
||||
if uv.is_valid(&settings.verified_users) {
|
||||
info!(
|
||||
@@ -189,7 +322,10 @@ pub async fn db_writer(
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
|
||||
Err(
|
||||
Error::SqlError(rusqlite::Error::QueryReturnedNoRows)
|
||||
| Error::SqlxError(sqlx::Error::RowNotFound),
|
||||
) => {
|
||||
debug!(
|
||||
"no verification records found for pubkey: {:?}",
|
||||
event.get_author_prefix()
|
||||
@@ -208,6 +344,52 @@ pub async fn db_writer(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nip05 address
|
||||
let nip05_address: Option<crate::nip05::Nip05Name> =
|
||||
validation.and_then(|x| x.ok().map(|y| y.name));
|
||||
|
||||
// GRPC check
|
||||
if let Some(ref mut c) = grpc_client {
|
||||
trace!("checking if grpc permits");
|
||||
let grpc_start = Instant::now();
|
||||
let decision_res = c
|
||||
.admit_event(
|
||||
&event,
|
||||
&subm_event.source_ip,
|
||||
subm_event.origin,
|
||||
subm_event.user_agent,
|
||||
nip05_address,
|
||||
subm_event.auth_pubkey,
|
||||
)
|
||||
.await;
|
||||
match decision_res {
|
||||
Ok(decision) => {
|
||||
if !decision.permitted() {
|
||||
// GPRC returned a decision to reject this event
|
||||
info!(
|
||||
"GRPC rejected event: {:?} (kind: {}) from: {:?} in: {:?} (IP: {:?})",
|
||||
event.get_event_id_prefix(),
|
||||
event.kind,
|
||||
event.get_author_prefix(),
|
||||
grpc_start.elapsed(),
|
||||
subm_event.source_ip
|
||||
);
|
||||
notice_tx
|
||||
.try_send(Notice::blocked(
|
||||
event.id,
|
||||
&decision.message().unwrap_or_default(),
|
||||
))
|
||||
.ok();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("GRPC server error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: cache recent list of authors to remove a DB call.
|
||||
let start = Instant::now();
|
||||
if event.is_ephemeral() {
|
||||
@@ -227,11 +409,12 @@ pub async fn db_writer(
|
||||
notice_tx.try_send(Notice::duplicate(event.id)).ok();
|
||||
} else {
|
||||
info!(
|
||||
"persisted event: {:?} (kind: {}) from: {:?} in: {:?}",
|
||||
"persisted event: {:?} (kind: {}) from: {:?} in: {:?} (IP: {:?})",
|
||||
event.get_event_id_prefix(),
|
||||
event.kind,
|
||||
event.get_author_prefix(),
|
||||
start.elapsed()
|
||||
start.elapsed(),
|
||||
subm_event.source_ip,
|
||||
);
|
||||
event_write = true;
|
||||
// send this out to all clients
|
||||
@@ -249,6 +432,17 @@ pub async fn db_writer(
|
||||
|
||||
// use rate limit, if defined, and if an event was actually written.
|
||||
if event_write {
|
||||
// If pay to relay is diabaled or the cost per event is 0
|
||||
// No need to update user balance
|
||||
if pay_to_relay_enabled && cost_per_event > 0 {
|
||||
// If the user balance is some, user was not on whitelist
|
||||
// Their balance should be reduced by the cost per event
|
||||
if let Some(_balance) = user_balance {
|
||||
let pubkey = Keys::from_pk_str(&event.pubkey)?;
|
||||
repo.update_account_balance(&pubkey, false, cost_per_event)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
if let Some(ref lim) = lim_opt {
|
||||
if let Err(n) = lim.check() {
|
||||
let wait_for = n.wait_time_from(clock.now());
|
||||
|
@@ -84,7 +84,8 @@ pub struct ConditionQuery {
|
||||
}
|
||||
|
||||
impl ConditionQuery {
|
||||
#[must_use] pub fn allows_event(&self, event: &Event) -> bool {
|
||||
#[must_use]
|
||||
pub fn allows_event(&self, event: &Event) -> bool {
|
||||
// check each condition, to ensure that the event complies
|
||||
// with the restriction.
|
||||
for c in &self.conditions {
|
||||
@@ -101,14 +102,15 @@ impl ConditionQuery {
|
||||
}
|
||||
|
||||
// Verify that the delegator approved the delegation; return a ConditionQuery if so.
|
||||
#[must_use] pub fn validate_delegation(
|
||||
#[must_use]
|
||||
pub fn validate_delegation(
|
||||
delegator: &str,
|
||||
delegatee: &str,
|
||||
cond_query: &str,
|
||||
sigstr: &str,
|
||||
) -> Option<ConditionQuery> {
|
||||
// form the token
|
||||
let tok = format!("nostr:delegation:{}:{}", delegatee, cond_query);
|
||||
let tok = format!("nostr:delegation:{delegatee}:{cond_query}");
|
||||
// form SHA256 hash
|
||||
let digest: sha256::Hash = sha256::Hash::hash(tok.as_bytes());
|
||||
let sig = schnorr::Signature::from_str(sigstr).unwrap();
|
||||
@@ -144,7 +146,8 @@ pub struct Condition {
|
||||
|
||||
impl Condition {
|
||||
/// Check if this condition allows the given event to be delegated
|
||||
#[must_use] pub fn allows_event(&self, event: &Event) -> bool {
|
||||
#[must_use]
|
||||
pub fn allows_event(&self, event: &Event) -> bool {
|
||||
// determine what the right-hand side of the operator is
|
||||
let resolved_field = match &self.field {
|
||||
Field::Kind => event.kind,
|
||||
|
60
src/error.rs
60
src/error.rs
@@ -62,6 +62,26 @@ pub enum Error {
|
||||
HexError(hex::FromHexError),
|
||||
#[error("Delegation parse error")]
|
||||
DelegationParseError,
|
||||
#[error("Channel closed error")]
|
||||
ChannelClosed,
|
||||
#[error("Authz error")]
|
||||
AuthzError,
|
||||
#[error("Tonic GRPC error")]
|
||||
TonicError(tonic::Status),
|
||||
#[error("Invalid AUTH message")]
|
||||
AuthFailure,
|
||||
#[error("I/O Error")]
|
||||
IoError(std::io::Error),
|
||||
#[error("Event builder error")]
|
||||
EventError(nostr::event::builder::Error),
|
||||
#[error("Nostr key error")]
|
||||
NostrKeyError(nostr::key::Error),
|
||||
#[error("Payment hash mismatch")]
|
||||
PaymentHash,
|
||||
#[error("Error parsing url")]
|
||||
URLParseError(url::ParseError),
|
||||
#[error("HTTP error")]
|
||||
HTTPError(http::Error),
|
||||
#[error("Unknown/Undocumented")]
|
||||
UnknownError,
|
||||
}
|
||||
@@ -130,3 +150,43 @@ impl From<config::ConfigError> for Error {
|
||||
Error::ConfigError(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<tonic::Status> for Error {
|
||||
/// Wrap Config error
|
||||
fn from(r: tonic::Status) -> Self {
|
||||
Error::TonicError(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for Error {
|
||||
fn from(r: std::io::Error) -> Self {
|
||||
Error::IoError(r)
|
||||
}
|
||||
}
|
||||
impl From<nostr::event::builder::Error> for Error {
|
||||
/// Wrap event builder error
|
||||
fn from(r: nostr::event::builder::Error) -> Self {
|
||||
Error::EventError(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<nostr::key::Error> for Error {
|
||||
/// Wrap nostr key error
|
||||
fn from(r: nostr::key::Error) -> Self {
|
||||
Error::NostrKeyError(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<url::ParseError> for Error {
|
||||
/// Wrap nostr key error
|
||||
fn from(r: url::ParseError) -> Self {
|
||||
Error::URLParseError(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<http::Error> for Error {
|
||||
/// Wrap nostr key error
|
||||
fn from(r: http::Error) -> Self {
|
||||
Error::HTTPError(r)
|
||||
}
|
||||
}
|
||||
|
245
src/event.rs
245
src/event.rs
@@ -1,7 +1,12 @@
|
||||
//! Event parsing and validation
|
||||
use crate::delegation::validate_delegation;
|
||||
use crate::error::Error::{CommandUnknownError, EventCouldNotCanonicalize, EventInvalidId, EventInvalidSignature, EventMalformedPubkey};
|
||||
use crate::error::Error::{
|
||||
CommandUnknownError, EventCouldNotCanonicalize, EventInvalidId, EventInvalidSignature,
|
||||
EventMalformedPubkey,
|
||||
};
|
||||
use crate::error::Result;
|
||||
use crate::event::EventWrapper::WrappedAuth;
|
||||
use crate::event::EventWrapper::WrappedEvent;
|
||||
use crate::nip05;
|
||||
use crate::utils::unix_time;
|
||||
use bitcoin_hashes::{sha256, Hash};
|
||||
@@ -28,7 +33,8 @@ pub struct EventCmd {
|
||||
}
|
||||
|
||||
impl EventCmd {
|
||||
#[must_use] pub fn event_id(&self) -> &str {
|
||||
#[must_use]
|
||||
pub fn event_id(&self) -> &str {
|
||||
&self.event.id
|
||||
}
|
||||
}
|
||||
@@ -65,7 +71,8 @@ where
|
||||
}
|
||||
|
||||
/// Attempt to form a single-char tag name.
|
||||
#[must_use] pub fn single_char_tagname(tagname: &str) -> Option<char> {
|
||||
#[must_use]
|
||||
pub fn single_char_tagname(tagname: &str) -> Option<char> {
|
||||
// We return the tag character if and only if the tagname consists
|
||||
// of a single char.
|
||||
let mut tagnamechars = tagname.chars();
|
||||
@@ -83,17 +90,26 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub enum EventWrapper {
|
||||
WrappedEvent(Event),
|
||||
WrappedAuth(Event),
|
||||
}
|
||||
|
||||
/// Convert network event to parsed/validated event.
|
||||
impl From<EventCmd> for Result<Event> {
|
||||
fn from(ec: EventCmd) -> Result<Event> {
|
||||
impl From<EventCmd> for Result<EventWrapper> {
|
||||
fn from(ec: EventCmd) -> Result<EventWrapper> {
|
||||
// ensure command is correct
|
||||
if ec.cmd == "EVENT" {
|
||||
ec.event.validate().map(|_| {
|
||||
let mut e = ec.event;
|
||||
e.build_index();
|
||||
e.update_delegation();
|
||||
e
|
||||
WrappedEvent(e)
|
||||
})
|
||||
} else if ec.cmd == "AUTH" {
|
||||
// we don't want to validate the event here, because NIP-42 can be disabled
|
||||
// it will be validated later during the authentication process
|
||||
Ok(WrappedAuth(ec.event))
|
||||
} else {
|
||||
Err(CommandUnknownError)
|
||||
}
|
||||
@@ -102,7 +118,8 @@ impl From<EventCmd> for Result<Event> {
|
||||
|
||||
impl Event {
|
||||
#[cfg(test)]
|
||||
#[must_use] pub fn simple_event() -> Event {
|
||||
#[must_use]
|
||||
pub fn simple_event() -> Event {
|
||||
Event {
|
||||
id: "0".to_owned(),
|
||||
pubkey: "0".to_owned(),
|
||||
@@ -116,41 +133,73 @@ impl Event {
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use] pub fn is_kind_metadata(&self) -> bool {
|
||||
#[must_use]
|
||||
pub fn is_kind_metadata(&self) -> bool {
|
||||
self.kind == 0
|
||||
}
|
||||
|
||||
/// Should this event be persisted?
|
||||
#[must_use] pub fn is_ephemeral(&self) -> bool {
|
||||
#[must_use]
|
||||
pub fn is_ephemeral(&self) -> bool {
|
||||
self.kind >= 20000 && self.kind < 30000
|
||||
}
|
||||
|
||||
/// Is this event currently expired?
|
||||
pub fn is_expired(&self) -> bool {
|
||||
if let Some(exp) = self.expiration() {
|
||||
exp <= unix_time()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine the time at which this event should expire
|
||||
pub fn expiration(&self) -> Option<u64> {
|
||||
let default = "".to_string();
|
||||
let dvals: Vec<&String> = self
|
||||
.tags
|
||||
.iter()
|
||||
.filter(|x| !x.is_empty())
|
||||
.filter(|x| x.get(0).unwrap() == "expiration")
|
||||
.map(|x| x.get(1).unwrap_or(&default))
|
||||
.take(1)
|
||||
.collect();
|
||||
let val_first = dvals.get(0);
|
||||
val_first.and_then(|t| t.parse::<u64>().ok())
|
||||
}
|
||||
|
||||
/// Should this event be replaced with newer timestamps from same author?
|
||||
#[must_use] pub fn is_replaceable(&self) -> bool {
|
||||
self.kind == 0 || self.kind == 3 || self.kind == 41 || (self.kind >= 10000 && self.kind < 20000)
|
||||
#[must_use]
|
||||
pub fn is_replaceable(&self) -> bool {
|
||||
self.kind == 0
|
||||
|| self.kind == 3
|
||||
|| self.kind == 41
|
||||
|| (self.kind >= 10000 && self.kind < 20000)
|
||||
}
|
||||
|
||||
/// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values?
|
||||
#[must_use] pub fn is_param_replaceable(&self) -> bool {
|
||||
#[must_use]
|
||||
pub fn is_param_replaceable(&self) -> bool {
|
||||
self.kind >= 30000 && self.kind < 40000
|
||||
}
|
||||
|
||||
/// What is the replaceable `d` tag value?
|
||||
|
||||
/// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values?
|
||||
#[must_use] pub fn distinct_param(&self) -> Option<String> {
|
||||
#[must_use]
|
||||
pub fn distinct_param(&self) -> Option<String> {
|
||||
if self.is_param_replaceable() {
|
||||
let default = "".to_string();
|
||||
let dvals:Vec<&String> = self.tags
|
||||
let dvals: Vec<&String> = self
|
||||
.tags
|
||||
.iter()
|
||||
.filter(|x| x.len() >= 1)
|
||||
.filter(|x| !x.is_empty())
|
||||
.filter(|x| x.get(0).unwrap() == "d")
|
||||
.map(|x| x.get(1).unwrap_or_else(|| &default)).take(1)
|
||||
.map(|x| x.get(1).unwrap_or(&default))
|
||||
.take(1)
|
||||
.collect();
|
||||
let dval_first = dvals.get(0);
|
||||
match dval_first {
|
||||
Some(_) => {dval_first.map(|x| x.to_string())},
|
||||
None => Some(default)
|
||||
Some(_) => dval_first.map(|x| x.to_string()),
|
||||
None => Some(default),
|
||||
}
|
||||
} else {
|
||||
None
|
||||
@@ -158,7 +207,8 @@ impl Event {
|
||||
}
|
||||
|
||||
/// Pull a NIP-05 Name out of the event, if one exists
|
||||
#[must_use] pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
|
||||
#[must_use]
|
||||
pub fn get_nip05_addr(&self) -> Option<nip05::Nip05Name> {
|
||||
if self.is_kind_metadata() {
|
||||
// very quick check if we should attempt to parse this json
|
||||
if self.content.contains("\"nip05\"") {
|
||||
@@ -175,7 +225,8 @@ impl Event {
|
||||
// is this event delegated (properly)?
|
||||
// does the signature match, and are conditions valid?
|
||||
// if so, return an alternate author for the event
|
||||
#[must_use] pub fn delegated_author(&self) -> Option<String> {
|
||||
#[must_use]
|
||||
pub fn delegated_author(&self) -> Option<String> {
|
||||
// is there a delegation tag?
|
||||
let delegation_tag: Vec<String> = self
|
||||
.tags
|
||||
@@ -183,7 +234,8 @@ impl Event {
|
||||
.filter(|x| x.len() == 4)
|
||||
.filter(|x| x.get(0).unwrap() == "delegation")
|
||||
.take(1)
|
||||
.next()?.clone(); // get first tag
|
||||
.next()?
|
||||
.clone(); // get first tag
|
||||
|
||||
//let delegation_tag = self.tag_values_by_name("delegation");
|
||||
// delegation tags should have exactly 3 elements after the name (pubkey, condition, sig)
|
||||
@@ -243,15 +295,18 @@ impl Event {
|
||||
}
|
||||
|
||||
/// Create a short event identifier, suitable for logging.
|
||||
#[must_use] pub fn get_event_id_prefix(&self) -> String {
|
||||
#[must_use]
|
||||
pub fn get_event_id_prefix(&self) -> String {
|
||||
self.id.chars().take(8).collect()
|
||||
}
|
||||
#[must_use] pub fn get_author_prefix(&self) -> String {
|
||||
#[must_use]
|
||||
pub fn get_author_prefix(&self) -> String {
|
||||
self.pubkey.chars().take(8).collect()
|
||||
}
|
||||
|
||||
/// Retrieve tag initial values across all tags matching the name
|
||||
#[must_use] pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
|
||||
#[must_use]
|
||||
pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
|
||||
self.tags
|
||||
.iter()
|
||||
.filter(|x| x.len() > 1)
|
||||
@@ -260,7 +315,8 @@ impl Event {
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[must_use] pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
|
||||
#[must_use]
|
||||
pub fn is_valid_timestamp(&self, reject_future_seconds: Option<usize>) -> bool {
|
||||
if let Some(allowable_future) = reject_future_seconds {
|
||||
let curr_time = unix_time();
|
||||
// calculate difference, plus how far future we allow
|
||||
@@ -292,14 +348,14 @@ impl Event {
|
||||
let c = c_opt.unwrap();
|
||||
// * compute the sha256sum.
|
||||
let digest: sha256::Hash = sha256::Hash::hash(c.as_bytes());
|
||||
let hex_digest = format!("{:x}", digest);
|
||||
let hex_digest = format!("{digest:x}");
|
||||
// * ensure the id matches the computed sha256sum.
|
||||
if self.id != hex_digest {
|
||||
debug!("event id does not match digest");
|
||||
return Err(EventInvalidId);
|
||||
}
|
||||
// * validate the message digest (sig) using the pubkey & computed sha256 message hash.
|
||||
let sig = schnorr::Signature::from_str(&self.sig).unwrap();
|
||||
let sig = schnorr::Signature::from_str(&self.sig).map_err(|_| EventInvalidSignature)?;
|
||||
if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) {
|
||||
if let Ok(pubkey) = XOnlyPublicKey::from_str(&self.pubkey) {
|
||||
SECP.verify_schnorr(&sig, &msg, &pubkey)
|
||||
@@ -315,7 +371,7 @@ impl Event {
|
||||
}
|
||||
|
||||
/// Convert event to canonical representation for signing.
|
||||
fn to_canonical(&self) -> Option<String> {
|
||||
pub fn to_canonical(&self) -> Option<String> {
|
||||
// create a JsonValue for each event element
|
||||
let mut c: Vec<Value> = vec![];
|
||||
// id must be set to 0
|
||||
@@ -352,7 +408,8 @@ impl Event {
|
||||
}
|
||||
|
||||
/// Determine if the given tag and value set intersect with tags in this event.
|
||||
#[must_use] pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
|
||||
#[must_use]
|
||||
pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet<String>) -> bool {
|
||||
match &self.tagidx {
|
||||
// check if this is indexable tagname
|
||||
Some(idx) => match idx.get(&tagname) {
|
||||
@@ -367,6 +424,22 @@ impl Event {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<nostr::Event> for Event {
|
||||
fn from(nostr_event: nostr::Event) -> Self {
|
||||
Event {
|
||||
id: nostr_event.id.to_hex(),
|
||||
pubkey: nostr_event.pubkey.to_string(),
|
||||
created_at: nostr_event.created_at.as_u64(),
|
||||
kind: nostr_event.kind.as_u64(),
|
||||
tags: nostr_event.tags.iter().map(|x| x.as_vec()).collect(),
|
||||
content: nostr_event.content,
|
||||
sig: nostr_event.sig.to_string(),
|
||||
delegated_by: None,
|
||||
tagidx: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -391,7 +464,7 @@ mod tests {
|
||||
fn empty_event_tag_match() {
|
||||
let event = Event::simple_event();
|
||||
assert!(!event
|
||||
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
|
||||
.generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -399,12 +472,11 @@ mod tests {
|
||||
let mut event = Event::simple_event();
|
||||
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
|
||||
event.build_index();
|
||||
assert_eq!(
|
||||
assert!(
|
||||
event.generic_tag_val_intersect(
|
||||
'e',
|
||||
&HashSet::from(["foo".to_owned(), "bar".to_owned()])
|
||||
),
|
||||
true
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -539,28 +611,28 @@ mod tests {
|
||||
#[test]
|
||||
fn ephemeral_event() {
|
||||
let mut event = Event::simple_event();
|
||||
event.kind=20000;
|
||||
event.kind = 20000;
|
||||
assert!(event.is_ephemeral());
|
||||
event.kind=29999;
|
||||
event.kind = 29999;
|
||||
assert!(event.is_ephemeral());
|
||||
event.kind=30000;
|
||||
event.kind = 30000;
|
||||
assert!(!event.is_ephemeral());
|
||||
event.kind=19999;
|
||||
event.kind = 19999;
|
||||
assert!(!event.is_ephemeral());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replaceable_event() {
|
||||
let mut event = Event::simple_event();
|
||||
event.kind=0;
|
||||
event.kind = 0;
|
||||
assert!(event.is_replaceable());
|
||||
event.kind=3;
|
||||
event.kind = 3;
|
||||
assert!(event.is_replaceable());
|
||||
event.kind=10000;
|
||||
event.kind = 10000;
|
||||
assert!(event.is_replaceable());
|
||||
event.kind=19999;
|
||||
event.kind = 19999;
|
||||
assert!(event.is_replaceable());
|
||||
event.kind=20000;
|
||||
event.kind = 20000;
|
||||
assert!(!event.is_replaceable());
|
||||
}
|
||||
|
||||
@@ -582,8 +654,7 @@ mod tests {
|
||||
// NIP case #1: "tags":[["d",""]]
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned(), "".to_owned()]];
|
||||
event.tags = vec![vec!["d".to_owned(), "".to_owned()]];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
@@ -600,8 +671,7 @@ mod tests {
|
||||
// NIP case #3: "tags":[["d"]]: implicit empty value ""
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned()]];
|
||||
event.tags = vec![vec!["d".to_owned()]];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
@@ -612,7 +682,7 @@ mod tests {
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned(), "".to_string()],
|
||||
vec!["d".to_owned(), "not empty".to_string()]
|
||||
vec!["d".to_owned(), "not empty".to_string()],
|
||||
];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
@@ -625,7 +695,7 @@ mod tests {
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned(), "not empty".to_string()],
|
||||
vec!["d".to_owned(), "".to_string()]
|
||||
vec!["d".to_owned(), "".to_string()],
|
||||
];
|
||||
assert_eq!(event.distinct_param(), Some("not empty".to_string()));
|
||||
}
|
||||
@@ -638,7 +708,7 @@ mod tests {
|
||||
event.tags = vec![
|
||||
vec!["d".to_owned()],
|
||||
vec!["d".to_owned(), "second value".to_string()],
|
||||
vec!["d".to_owned(), "third value".to_string()]
|
||||
vec!["d".to_owned(), "third value".to_string()],
|
||||
];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
@@ -648,10 +718,77 @@ mod tests {
|
||||
// NIP case #6: "tags":[["e"]]: same as no tags
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 30000;
|
||||
event.tags = vec![
|
||||
vec!["e".to_owned()],
|
||||
];
|
||||
event.tags = vec![vec!["e".to_owned()]];
|
||||
assert_eq!(event.distinct_param(), Some("".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiring_event_none() {
|
||||
// regular events do not expire
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 7;
|
||||
event.tags = vec![vec!["test".to_string(), "foo".to_string()]];
|
||||
assert_eq!(event.expiration(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiring_event_empty() {
|
||||
// regular events do not expire
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 7;
|
||||
event.tags = vec![vec!["expiration".to_string()]];
|
||||
assert_eq!(event.expiration(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiring_event_future() {
|
||||
// a normal expiring event
|
||||
let exp: u64 = 1676264138;
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 1;
|
||||
event.tags = vec![vec!["expiration".to_string(), exp.to_string()]];
|
||||
assert_eq!(event.expiration(), Some(exp));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiring_event_negative() {
|
||||
// expiration set to a negative value (invalid)
|
||||
let exp: i64 = -90;
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 1;
|
||||
event.tags = vec![vec!["expiration".to_string(), exp.to_string()]];
|
||||
assert_eq!(event.expiration(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiring_event_zero() {
|
||||
// a normal expiring event set to zero
|
||||
let exp: i64 = 0;
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 1;
|
||||
event.tags = vec![vec!["expiration".to_string(), exp.to_string()]];
|
||||
assert_eq!(event.expiration(), Some(0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiring_event_fraction() {
|
||||
// expiration is fractional (invalid)
|
||||
let exp: f64 = 23.334;
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 1;
|
||||
event.tags = vec![vec!["expiration".to_string(), exp.to_string()]];
|
||||
assert_eq!(event.expiration(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiring_event_multiple() {
|
||||
// multiple values, we just take the first
|
||||
let mut event = Event::simple_event();
|
||||
event.kind = 1;
|
||||
event.tags = vec![
|
||||
vec!["expiration".to_string(), (10).to_string()],
|
||||
vec!["expiration".to_string(), (20).to_string()],
|
||||
];
|
||||
assert_eq!(event.expiration(), Some(10));
|
||||
}
|
||||
}
|
||||
|
@@ -1,5 +1,5 @@
|
||||
//! Utilities for searching hexadecimal
|
||||
use crate::utils::{is_hex};
|
||||
use crate::utils::is_hex;
|
||||
use hex;
|
||||
|
||||
/// Types of hexadecimal queries.
|
||||
@@ -19,7 +19,8 @@ fn is_all_fs(s: &str) -> bool {
|
||||
}
|
||||
|
||||
/// Find the next hex sequence greater than the argument.
|
||||
#[must_use] pub fn hex_range(s: &str) -> Option<HexSearch> {
|
||||
#[must_use]
|
||||
pub fn hex_range(s: &str) -> Option<HexSearch> {
|
||||
let mut hash_base = s.to_owned();
|
||||
if !is_hex(&hash_base) || hash_base.len() > 64 {
|
||||
return None;
|
||||
@@ -56,9 +57,9 @@ fn is_all_fs(s: &str) -> bool {
|
||||
} else if odd {
|
||||
// check if first char in this byte is NOT 'f'
|
||||
if b < 240 {
|
||||
// bump up the first character in this byte
|
||||
// bump up the first character in this byte
|
||||
upper[byte_len] = b + 16;
|
||||
// increment done, stop iterating through the vec
|
||||
// increment done, stop iterating through the vec
|
||||
break;
|
||||
}
|
||||
// if it is 'f', reset the byte to 0 and do a carry
|
||||
|
97
src/info.rs
97
src/info.rs
@@ -1,9 +1,35 @@
|
||||
//! Relay metadata using NIP-11
|
||||
/// Relay Info
|
||||
use crate::config;
|
||||
use crate::config::Settings;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub const CARGO_PKG_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
|
||||
pub const UNIT: &str = "sats";
|
||||
|
||||
/// Limitations of the relay as specified in NIP-111
|
||||
/// (This nip isn't finalized so may change)
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
pub struct Limitation {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
payment_required: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[allow(unused)]
|
||||
pub struct Fees {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
admission: Option<Vec<Fee>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
publication: Option<Vec<Fee>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[allow(unused)]
|
||||
pub struct Fee {
|
||||
amount: u64,
|
||||
unit: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[allow(unused)]
|
||||
@@ -19,25 +45,88 @@ pub struct RelayInfo {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub contact: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub icon: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub supported_nips: Option<Vec<i64>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub software: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub version: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub limitation: Option<Limitation>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub payment_url: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub fees: Option<Fees>,
|
||||
}
|
||||
|
||||
/// Convert an Info configuration into public Relay Info
|
||||
impl From<config::Info> for RelayInfo {
|
||||
fn from(i: config::Info) -> Self {
|
||||
impl From<Settings> for RelayInfo {
|
||||
fn from(c: Settings) -> Self {
|
||||
let mut supported_nips = vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 33, 40];
|
||||
|
||||
if c.authorization.nip42_auth {
|
||||
supported_nips.push(42);
|
||||
supported_nips.sort();
|
||||
}
|
||||
|
||||
let i = c.info;
|
||||
let p = c.pay_to_relay;
|
||||
|
||||
let limitations = Limitation {
|
||||
payment_required: Some(p.enabled),
|
||||
};
|
||||
|
||||
let (payment_url, fees) = if p.enabled {
|
||||
let admission_fee = if p.admission_cost > 0 {
|
||||
Some(vec![Fee {
|
||||
amount: p.admission_cost,
|
||||
unit: UNIT.to_string(),
|
||||
}])
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let post_fee = if p.cost_per_event > 0 {
|
||||
Some(vec![Fee {
|
||||
amount: p.cost_per_event,
|
||||
unit: UNIT.to_string(),
|
||||
}])
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let fees = Fees {
|
||||
admission: admission_fee,
|
||||
publication: post_fee,
|
||||
};
|
||||
|
||||
let payment_url = if p.enabled && i.relay_url.is_some() {
|
||||
Some(format!(
|
||||
"{}join",
|
||||
i.relay_url.clone().unwrap().replace("ws", "http")
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(payment_url, Some(fees))
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
RelayInfo {
|
||||
id: i.relay_url,
|
||||
name: i.name,
|
||||
description: i.description,
|
||||
pubkey: i.pubkey,
|
||||
contact: i.contact,
|
||||
supported_nips: Some(vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 33]),
|
||||
supported_nips: Some(supported_nips),
|
||||
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
|
||||
version: CARGO_PKG_VERSION.map(std::borrow::ToOwned::to_owned),
|
||||
limitation: Some(limitations),
|
||||
payment_url,
|
||||
fees,
|
||||
icon: i.relay_icon,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -8,10 +8,12 @@ pub mod error;
|
||||
pub mod event;
|
||||
pub mod hexrange;
|
||||
pub mod info;
|
||||
pub mod nauthz;
|
||||
pub mod nip05;
|
||||
pub mod notice;
|
||||
pub mod repo;
|
||||
pub mod subscription;
|
||||
pub mod utils;
|
||||
// Public API for creating relays programatically
|
||||
// Public API for creating relays programmatically
|
||||
pub mod payment;
|
||||
pub mod server;
|
||||
|
74
src/main.rs
74
src/main.rs
@@ -1,19 +1,61 @@
|
||||
//! Server process
|
||||
use clap::Parser;
|
||||
use console_subscriber::ConsoleLayer;
|
||||
use nostr_rs_relay::cli::CLIArgs;
|
||||
use nostr_rs_relay::config;
|
||||
use nostr_rs_relay::server::start_server;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::process;
|
||||
use std::sync::mpsc as syncmpsc;
|
||||
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
|
||||
use std::thread;
|
||||
#[cfg(not(target_env = "msvc"))]
|
||||
use tikv_jemallocator::Jemalloc;
|
||||
use tracing::info;
|
||||
use console_subscriber::ConsoleLayer;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
#[cfg(not(target_env = "msvc"))]
|
||||
#[global_allocator]
|
||||
static GLOBAL: Jemalloc = Jemalloc;
|
||||
|
||||
/// Start running a Nostr relay server.
|
||||
fn main() {
|
||||
// configure settings from config.toml
|
||||
// replace default settings with those read from config.toml
|
||||
let mut settings = config::Settings::new();
|
||||
let args = CLIArgs::parse();
|
||||
|
||||
// get config file name from args
|
||||
let config_file_arg = args.config;
|
||||
|
||||
// Ensure the config file is readable if it was explicitly set
|
||||
if let Some(config_path) = config_file_arg.as_ref() {
|
||||
let path = Path::new(&config_path);
|
||||
if !path.exists() {
|
||||
eprintln!("Config file not found: {}", &config_path);
|
||||
process::exit(1);
|
||||
}
|
||||
if !path.is_file() {
|
||||
eprintln!("Invalid config file path: {}", &config_path);
|
||||
process::exit(1);
|
||||
}
|
||||
if let Err(err) = fs::metadata(path) {
|
||||
eprintln!("Error while accessing file metadata: {}", err);
|
||||
process::exit(1);
|
||||
}
|
||||
if let Err(err) = fs::File::open(path) {
|
||||
eprintln!("Config file is not readable: {}", err);
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
let mut _log_guard: Option<WorkerGuard> = None;
|
||||
|
||||
// configure settings from the config file (defaults to config.toml)
|
||||
// replace default settings with those read from the config file
|
||||
let mut settings = config::Settings::new(&config_file_arg).unwrap_or_else(|e| {
|
||||
eprintln!("Error reading config file ({:?})", e);
|
||||
process::exit(1);
|
||||
});
|
||||
|
||||
// setup tracing
|
||||
if settings.diagnostics.tracing {
|
||||
@@ -21,12 +63,30 @@ fn main() {
|
||||
ConsoleLayer::builder().with_default_env().init();
|
||||
} else {
|
||||
// standard logging
|
||||
tracing_subscriber::fmt::try_init().unwrap();
|
||||
if let Some(path) = &settings.logging.folder_path {
|
||||
// write logs to a folder
|
||||
let prefix = match &settings.logging.file_prefix {
|
||||
Some(p) => p.as_str(),
|
||||
None => "relay",
|
||||
};
|
||||
let file_appender = tracing_appender::rolling::daily(path, prefix);
|
||||
let (non_blocking, guard) = tracing_appender::non_blocking(file_appender);
|
||||
let filter = EnvFilter::from_default_env();
|
||||
// assign to a variable that is not dropped till the program ends
|
||||
_log_guard = Some(guard);
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(filter)
|
||||
.with_writer(non_blocking)
|
||||
.try_init()
|
||||
.unwrap();
|
||||
} else {
|
||||
// write to stdout
|
||||
tracing_subscriber::fmt::try_init().unwrap();
|
||||
}
|
||||
}
|
||||
info!("Starting up from main");
|
||||
|
||||
let args = CLIArgs::parse();
|
||||
|
||||
// get database directory from args
|
||||
let db_dir_arg = args.db;
|
||||
|
||||
|
111
src/nauthz.rs
Normal file
111
src/nauthz.rs
Normal file
@@ -0,0 +1,111 @@
|
||||
use crate::error::{Error, Result};
|
||||
use crate::{event::Event, nip05::Nip05Name};
|
||||
use nauthz_grpc::authorization_client::AuthorizationClient;
|
||||
use nauthz_grpc::event::TagEntry;
|
||||
use nauthz_grpc::{Decision, Event as GrpcEvent, EventReply, EventRequest};
|
||||
use tracing::{info, warn};
|
||||
|
||||
pub mod nauthz_grpc {
|
||||
tonic::include_proto!("nauthz");
|
||||
}
|
||||
|
||||
// A decision for the DB to act upon
|
||||
pub trait AuthzDecision: Send + Sync {
|
||||
fn permitted(&self) -> bool;
|
||||
fn message(&self) -> Option<String>;
|
||||
}
|
||||
|
||||
impl AuthzDecision for EventReply {
|
||||
fn permitted(&self) -> bool {
|
||||
self.decision == Decision::Permit as i32
|
||||
}
|
||||
fn message(&self) -> Option<String> {
|
||||
self.message.clone()
|
||||
}
|
||||
}
|
||||
|
||||
// A connection to an event admission GRPC server
|
||||
pub struct EventAuthzService {
|
||||
server_addr: String,
|
||||
conn: Option<AuthorizationClient<tonic::transport::Channel>>,
|
||||
}
|
||||
|
||||
// conversion of Nip05Names into GRPC type
|
||||
impl std::convert::From<Nip05Name> for nauthz_grpc::event_request::Nip05Name {
|
||||
fn from(value: Nip05Name) -> Self {
|
||||
nauthz_grpc::event_request::Nip05Name {
|
||||
local: value.local.clone(),
|
||||
domain: value.domain,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// conversion of event tags into gprc struct
|
||||
fn tags_to_protobuf(tags: &[Vec<String>]) -> Vec<TagEntry> {
|
||||
tags.iter()
|
||||
.map(|x| TagEntry { values: x.clone() })
|
||||
.collect()
|
||||
}
|
||||
|
||||
impl EventAuthzService {
|
||||
pub async fn connect(server_addr: &str) -> EventAuthzService {
|
||||
let mut eas = EventAuthzService {
|
||||
server_addr: server_addr.to_string(),
|
||||
conn: None,
|
||||
};
|
||||
eas.ready_connection().await;
|
||||
eas
|
||||
}
|
||||
|
||||
pub async fn ready_connection(&mut self) {
|
||||
if self.conn.is_none() {
|
||||
let client = AuthorizationClient::connect(self.server_addr.to_string()).await;
|
||||
if let Err(ref msg) = client {
|
||||
warn!("could not connect to nostr authz GRPC server: {:?}", msg);
|
||||
} else {
|
||||
info!("connected to nostr authorization GRPC server");
|
||||
}
|
||||
self.conn = client.ok();
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn admit_event(
|
||||
&mut self,
|
||||
event: &Event,
|
||||
ip: &str,
|
||||
origin: Option<String>,
|
||||
user_agent: Option<String>,
|
||||
nip05: Option<Nip05Name>,
|
||||
auth_pubkey: Option<Vec<u8>>,
|
||||
) -> Result<Box<dyn AuthzDecision>> {
|
||||
self.ready_connection().await;
|
||||
let id_blob = hex::decode(&event.id)?;
|
||||
let pubkey_blob = hex::decode(&event.pubkey)?;
|
||||
let sig_blob = hex::decode(&event.sig)?;
|
||||
if let Some(ref mut c) = self.conn {
|
||||
let gevent = GrpcEvent {
|
||||
id: id_blob,
|
||||
pubkey: pubkey_blob,
|
||||
sig: sig_blob,
|
||||
created_at: event.created_at,
|
||||
kind: event.kind,
|
||||
content: event.content.clone(),
|
||||
tags: tags_to_protobuf(&event.tags),
|
||||
};
|
||||
let svr_res = c
|
||||
.event_admit(EventRequest {
|
||||
event: Some(gevent),
|
||||
ip_addr: Some(ip.to_string()),
|
||||
origin,
|
||||
user_agent,
|
||||
auth_pubkey,
|
||||
nip05: nip05.map(nauthz_grpc::event_request::Nip05Name::from),
|
||||
})
|
||||
.await?;
|
||||
let reply = svr_res.into_inner();
|
||||
Ok(Box::new(reply))
|
||||
} else {
|
||||
Err(Error::AuthzError)
|
||||
}
|
||||
}
|
||||
}
|
100
src/nip05.rs
100
src/nip05.rs
@@ -8,11 +8,11 @@ use crate::config::VerifiedUsers;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::event::Event;
|
||||
use crate::repo::NostrRepo;
|
||||
use std::sync::Arc;
|
||||
use hyper::body::HttpBody;
|
||||
use hyper::client::connect::HttpConnector;
|
||||
use hyper::Client;
|
||||
use hyper_tls::HttpsConnector;
|
||||
use hyper_rustls::HttpsConnector;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use std::time::SystemTime;
|
||||
@@ -42,13 +42,14 @@ pub struct Verifier {
|
||||
/// A NIP-05 identifier is a local part and domain.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
pub struct Nip05Name {
|
||||
local: String,
|
||||
domain: String,
|
||||
pub local: String,
|
||||
pub domain: String,
|
||||
}
|
||||
|
||||
impl Nip05Name {
|
||||
/// Does this name represent the entire domain?
|
||||
#[must_use] pub fn is_domain_only(&self) -> bool {
|
||||
#[must_use]
|
||||
pub fn is_domain_only(&self) -> bool {
|
||||
self.local == "_"
|
||||
}
|
||||
|
||||
@@ -58,8 +59,8 @@ impl Nip05Name {
|
||||
"https://{}/.well-known/nostr.json?name={}",
|
||||
self.domain, self.local
|
||||
)
|
||||
.parse::<http::Uri>()
|
||||
.ok()
|
||||
.parse::<http::Uri>()
|
||||
.ok()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,7 +74,10 @@ impl std::convert::TryFrom<&str> for Nip05Name {
|
||||
// check if local name is valid
|
||||
let local = components[0];
|
||||
let domain = components[1];
|
||||
if local.chars().all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.') {
|
||||
if local
|
||||
.chars()
|
||||
.all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.')
|
||||
{
|
||||
if domain
|
||||
.chars()
|
||||
.all(|x| x.is_alphanumeric() || x == '-' || x == '.')
|
||||
@@ -107,7 +111,7 @@ impl std::fmt::Display for Nip05Name {
|
||||
/// Check if the specified username and address are present and match in this response body
|
||||
fn body_contains_user(username: &str, address: &str, bytes: &hyper::body::Bytes) -> Result<bool> {
|
||||
// convert the body into json
|
||||
let body: serde_json::Value = serde_json::from_slice(&bytes)?;
|
||||
let body: serde_json::Value = serde_json::from_slice(bytes)?;
|
||||
// ensure we have a names object.
|
||||
let names_map = body
|
||||
.as_object()
|
||||
@@ -129,7 +133,12 @@ impl Verifier {
|
||||
) -> Result<Self> {
|
||||
info!("creating NIP-05 verifier");
|
||||
// setup hyper client
|
||||
let https = HttpsConnector::new();
|
||||
let https = hyper_rustls::HttpsConnectorBuilder::new()
|
||||
.with_native_roots()
|
||||
.https_or_http()
|
||||
.enable_http1()
|
||||
.build();
|
||||
|
||||
let client = Client::builder().build::<_, hyper::Body>(https);
|
||||
|
||||
// After all accounts have been re-verified, don't check again
|
||||
@@ -257,8 +266,15 @@ impl Verifier {
|
||||
// run a loop, restarting on failure
|
||||
loop {
|
||||
let res = self.run_internal().await;
|
||||
if let Err(e) = res {
|
||||
info!("error in verifier: {:?}", e);
|
||||
match res {
|
||||
Err(Error::ChannelClosed) => {
|
||||
// channel was closed, we are shutting down
|
||||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
info!("error in verifier: {:?}", e);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -305,6 +321,7 @@ impl Verifier {
|
||||
}
|
||||
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
|
||||
info!("metadata broadcast channel closed");
|
||||
return Err(Error::ChannelClosed);
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -341,42 +358,41 @@ impl Verifier {
|
||||
UserWebVerificationStatus::Verified => {
|
||||
// freshly verified account, update the
|
||||
// timestamp.
|
||||
self.repo.update_verification_timestamp(v.rowid)
|
||||
.await?;
|
||||
self.repo.update_verification_timestamp(v.rowid).await?;
|
||||
info!("verification updated for {}", v.to_string());
|
||||
|
||||
}
|
||||
UserWebVerificationStatus::DomainNotAllowed
|
||||
| UserWebVerificationStatus::Unknown => {
|
||||
// server may be offline, or temporarily
|
||||
// blocked by the config file. Note the
|
||||
// failure so we can process something
|
||||
// else.
|
||||
| UserWebVerificationStatus::Unknown => {
|
||||
// server may be offline, or temporarily
|
||||
// blocked by the config file. Note the
|
||||
// failure so we can process something
|
||||
// else.
|
||||
|
||||
// have we had enough failures to give up?
|
||||
if v.failure_count >= max_failures as u64 {
|
||||
info!(
|
||||
"giving up on verifying {:?} after {} failures",
|
||||
v.name, v.failure_count
|
||||
);
|
||||
self.repo.delete_verification(v.rowid)
|
||||
.await?;
|
||||
} else {
|
||||
// record normal failure, incrementing failure count
|
||||
info!("verification failed for {}", v.to_string());
|
||||
self.repo.fail_verification(v.rowid).await?;
|
||||
}
|
||||
}
|
||||
// have we had enough failures to give up?
|
||||
if v.failure_count >= max_failures as u64 {
|
||||
info!(
|
||||
"giving up on verifying {:?} after {} failures",
|
||||
v.name, v.failure_count
|
||||
);
|
||||
self.repo.delete_verification(v.rowid).await?;
|
||||
} else {
|
||||
// record normal failure, incrementing failure count
|
||||
info!("verification failed for {}", v.to_string());
|
||||
self.repo.fail_verification(v.rowid).await?;
|
||||
}
|
||||
}
|
||||
UserWebVerificationStatus::Unverified => {
|
||||
// domain has removed the verification, drop
|
||||
// the record on our side.
|
||||
info!("verification rescinded for {}", v.to_string());
|
||||
self.repo.delete_verification(v.rowid)
|
||||
.await?;
|
||||
self.repo.delete_verification(v.rowid).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(Error::SqlError(rusqlite::Error::QueryReturnedNoRows)) => {
|
||||
Err(
|
||||
Error::SqlError(rusqlite::Error::QueryReturnedNoRows)
|
||||
| Error::SqlxError(sqlx::Error::RowNotFound),
|
||||
) => {
|
||||
// No users need verification. Reset the interval to
|
||||
// the next verification attempt.
|
||||
let start = tokio::time::Instant::now() + self.wait_after_finish;
|
||||
@@ -425,7 +441,9 @@ impl Verifier {
|
||||
}
|
||||
}
|
||||
// write the verification record
|
||||
self.repo.create_verification_record(&event.id, name).await?;
|
||||
self.repo
|
||||
.create_verification_record(&event.id, name)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -455,7 +473,8 @@ pub struct VerificationRecord {
|
||||
|
||||
/// Check with settings to determine if a given domain is allowed to
|
||||
/// publish.
|
||||
#[must_use] pub fn is_domain_allowed(
|
||||
#[must_use]
|
||||
pub fn is_domain_allowed(
|
||||
domain: &str,
|
||||
whitelist: &Option<Vec<String>>,
|
||||
blacklist: &Option<Vec<String>>,
|
||||
@@ -475,7 +494,8 @@ pub struct VerificationRecord {
|
||||
impl VerificationRecord {
|
||||
/// Check if the record is recent enough to be considered valid,
|
||||
/// and the domain is allowed.
|
||||
#[must_use] pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
|
||||
#[must_use]
|
||||
pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool {
|
||||
//let settings = SETTINGS.read().unwrap();
|
||||
// how long a verification record is good for
|
||||
let nip05_expiration = &verified_users_settings.verify_expiration_duration;
|
||||
|
@@ -5,6 +5,7 @@ pub enum EventResultStatus {
|
||||
Blocked,
|
||||
RateLimited,
|
||||
Error,
|
||||
Restricted,
|
||||
}
|
||||
|
||||
pub struct EventResult {
|
||||
@@ -16,17 +17,20 @@ pub struct EventResult {
|
||||
pub enum Notice {
|
||||
Message(String),
|
||||
EventResult(EventResult),
|
||||
AuthChallenge(String),
|
||||
}
|
||||
|
||||
impl EventResultStatus {
|
||||
#[must_use] pub fn to_bool(&self) -> bool {
|
||||
#[must_use]
|
||||
pub fn to_bool(&self) -> bool {
|
||||
match self {
|
||||
Self::Duplicate | Self::Saved => true,
|
||||
Self::Invalid |Self::Blocked | Self::RateLimited | Self::Error => false,
|
||||
Self::Invalid | Self::Blocked | Self::RateLimited | Self::Error | Self::Restricted => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use] pub fn prefix(&self) -> &'static str {
|
||||
#[must_use]
|
||||
pub fn prefix(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Saved => "saved",
|
||||
Self::Duplicate => "duplicate",
|
||||
@@ -34,6 +38,7 @@ impl EventResultStatus {
|
||||
Self::Blocked => "blocked",
|
||||
Self::RateLimited => "rate-limited",
|
||||
Self::Error => "error",
|
||||
Self::Restricted => "restricted",
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -43,7 +48,8 @@ impl Notice {
|
||||
// Notice::err_msg(format!("{}", err), id)
|
||||
//}
|
||||
|
||||
#[must_use] pub fn message(msg: String) -> Notice {
|
||||
#[must_use]
|
||||
pub fn message(msg: String) -> Notice {
|
||||
Notice::Message(msg)
|
||||
}
|
||||
|
||||
@@ -52,27 +58,38 @@ impl Notice {
|
||||
Notice::EventResult(EventResult { id, msg, status })
|
||||
}
|
||||
|
||||
#[must_use] pub fn invalid(id: String, msg: &str) -> Notice {
|
||||
#[must_use]
|
||||
pub fn invalid(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Invalid)
|
||||
}
|
||||
|
||||
#[must_use] pub fn blocked(id: String, msg: &str) -> Notice {
|
||||
#[must_use]
|
||||
pub fn blocked(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Blocked)
|
||||
}
|
||||
|
||||
#[must_use] pub fn rate_limited(id: String, msg: &str) -> Notice {
|
||||
#[must_use]
|
||||
pub fn rate_limited(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::RateLimited)
|
||||
}
|
||||
|
||||
#[must_use] pub fn duplicate(id: String) -> Notice {
|
||||
#[must_use]
|
||||
pub fn duplicate(id: String) -> Notice {
|
||||
Notice::prefixed(id, "", EventResultStatus::Duplicate)
|
||||
}
|
||||
|
||||
#[must_use] pub fn error(id: String, msg: &str) -> Notice {
|
||||
#[must_use]
|
||||
pub fn error(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Error)
|
||||
}
|
||||
|
||||
#[must_use] pub fn saved(id: String) -> Notice {
|
||||
#[must_use]
|
||||
pub fn restricted(id: String, msg: &str) -> Notice {
|
||||
Notice::prefixed(id, msg, EventResultStatus::Restricted)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn saved(id: String) -> Notice {
|
||||
Notice::EventResult(EventResult {
|
||||
id,
|
||||
msg: "".into(),
|
||||
|
176
src/payment/lnbits.rs
Normal file
176
src/payment/lnbits.rs
Normal file
@@ -0,0 +1,176 @@
|
||||
//! LNBits payment processor
|
||||
use http::Uri;
|
||||
use hyper::client::connect::HttpConnector;
|
||||
use hyper::Client;
|
||||
use hyper_rustls::HttpsConnector;
|
||||
use nostr::Keys;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use rand::Rng;
|
||||
|
||||
use std::str::FromStr;
|
||||
use url::Url;
|
||||
|
||||
use crate::{config::Settings, error::Error};
|
||||
|
||||
use super::{InvoiceInfo, InvoiceStatus, PaymentProcessor};
|
||||
|
||||
const APIPATH: &str = "/api/v1/payments/";
|
||||
|
||||
/// Info LNBits expects in create invoice request
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct LNBitsCreateInvoice {
|
||||
out: bool,
|
||||
amount: u64,
|
||||
memo: String,
|
||||
webhook: String,
|
||||
unit: String,
|
||||
internal: bool,
|
||||
expiry: u64,
|
||||
}
|
||||
|
||||
/// Invoice response for LN bits
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct LNBitsCreateInvoiceResponse {
|
||||
payment_hash: String,
|
||||
payment_request: String,
|
||||
}
|
||||
|
||||
/// LNBits call back response
|
||||
/// Used when an invoice is paid
|
||||
/// lnbits to post the status change to relay
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct LNBitsCallback {
|
||||
pub checking_id: String,
|
||||
pub pending: bool,
|
||||
pub amount: u64,
|
||||
pub memo: String,
|
||||
pub time: u64,
|
||||
pub bolt11: String,
|
||||
pub preimage: String,
|
||||
pub payment_hash: String,
|
||||
pub wallet_id: String,
|
||||
pub webhook: String,
|
||||
pub webhook_status: Option<String>,
|
||||
}
|
||||
|
||||
/// LN Bits repose for check invoice endpoint
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct LNBitsCheckInvoiceResponse {
|
||||
paid: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LNBitsPaymentProcessor {
|
||||
/// HTTP client
|
||||
client: hyper::Client<HttpsConnector<HttpConnector>, hyper::Body>,
|
||||
settings: Settings,
|
||||
}
|
||||
|
||||
impl LNBitsPaymentProcessor {
|
||||
pub fn new(settings: &Settings) -> Self {
|
||||
// setup hyper client
|
||||
let https = hyper_rustls::HttpsConnectorBuilder::new()
|
||||
.with_native_roots()
|
||||
.https_only()
|
||||
.enable_http1()
|
||||
.build();
|
||||
let client = Client::builder().build::<_, hyper::Body>(https);
|
||||
|
||||
Self {
|
||||
client,
|
||||
settings: settings.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl PaymentProcessor for LNBitsPaymentProcessor {
|
||||
/// Calls LNBits api to ger new invoice
|
||||
async fn get_invoice(&self, key: &Keys, amount: u64) -> Result<InvoiceInfo, Error> {
|
||||
let random_number: u16 = rand::thread_rng().gen();
|
||||
let memo = format!("{}: {}", random_number, key.public_key());
|
||||
|
||||
let callback_url = Url::parse(
|
||||
&self
|
||||
.settings
|
||||
.info
|
||||
.relay_url
|
||||
.clone()
|
||||
.unwrap()
|
||||
.replace("ws", "http"),
|
||||
)?
|
||||
.join("lnbits")?;
|
||||
|
||||
let body = LNBitsCreateInvoice {
|
||||
out: false,
|
||||
amount,
|
||||
memo: memo.clone(),
|
||||
webhook: callback_url.to_string(),
|
||||
unit: "sat".to_string(),
|
||||
internal: false,
|
||||
expiry: 3600,
|
||||
};
|
||||
let url = Url::parse(&self.settings.pay_to_relay.node_url)?.join(APIPATH)?;
|
||||
let uri = Uri::from_str(url.as_str().strip_suffix('/').unwrap_or(url.as_str())).unwrap();
|
||||
|
||||
let req = hyper::Request::builder()
|
||||
.method(hyper::Method::POST)
|
||||
.uri(uri)
|
||||
.header("X-Api-Key", &self.settings.pay_to_relay.api_secret)
|
||||
.body(hyper::Body::from(serde_json::to_string(&body)?))
|
||||
.expect("request builder");
|
||||
|
||||
let res = self.client.request(req).await?;
|
||||
|
||||
// Json to Struct of LNbits callback
|
||||
let body = hyper::body::to_bytes(res.into_body()).await?;
|
||||
let invoice_response: LNBitsCreateInvoiceResponse = serde_json::from_slice(&body)?;
|
||||
|
||||
Ok(InvoiceInfo {
|
||||
pubkey: key.public_key().to_string(),
|
||||
payment_hash: invoice_response.payment_hash,
|
||||
bolt11: invoice_response.payment_request,
|
||||
amount,
|
||||
memo,
|
||||
status: InvoiceStatus::Unpaid,
|
||||
confirmed_at: None,
|
||||
})
|
||||
}
|
||||
|
||||
/// Calls LNBits Api to check the payment status of invoice
|
||||
async fn check_invoice(&self, payment_hash: &str) -> Result<InvoiceStatus, Error> {
|
||||
let url = Url::parse(&self.settings.pay_to_relay.node_url)?
|
||||
.join(APIPATH)?
|
||||
.join(payment_hash)?;
|
||||
let uri = Uri::from_str(url.as_str()).unwrap();
|
||||
|
||||
let req = hyper::Request::builder()
|
||||
.method(hyper::Method::GET)
|
||||
.uri(uri)
|
||||
.header("X-Api-Key", &self.settings.pay_to_relay.api_secret)
|
||||
.body(hyper::Body::empty())
|
||||
.expect("request builder");
|
||||
|
||||
let res = self.client.request(req).await?;
|
||||
// Json to Struct of LNbits callback
|
||||
let body = hyper::body::to_bytes(res.into_body()).await?;
|
||||
let invoice_response: Value = serde_json::from_slice(&body)?;
|
||||
|
||||
let status = if let Ok(invoice_response) =
|
||||
serde_json::from_value::<LNBitsCheckInvoiceResponse>(invoice_response)
|
||||
{
|
||||
if invoice_response.paid {
|
||||
InvoiceStatus::Paid
|
||||
} else {
|
||||
InvoiceStatus::Unpaid
|
||||
}
|
||||
} else {
|
||||
InvoiceStatus::Expired
|
||||
};
|
||||
|
||||
Ok(status)
|
||||
}
|
||||
}
|
276
src/payment/mod.rs
Normal file
276
src/payment/mod.rs
Normal file
@@ -0,0 +1,276 @@
|
||||
use crate::error::{Error, Result};
|
||||
use crate::event::Event;
|
||||
use crate::payment::lnbits::LNBitsPaymentProcessor;
|
||||
use crate::repo::NostrRepo;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use tracing::{info, warn};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use nostr::key::{FromPkStr, FromSkStr};
|
||||
use nostr::{key::Keys, Event as NostrEvent, EventBuilder};
|
||||
|
||||
pub mod lnbits;
|
||||
|
||||
/// Payment handler
|
||||
pub struct Payment {
|
||||
/// Repository for saving/retrieving events and events
|
||||
repo: Arc<dyn NostrRepo>,
|
||||
/// Newly validated events get written and then broadcast on this channel to subscribers
|
||||
event_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
/// Payment message sender
|
||||
payment_tx: tokio::sync::broadcast::Sender<PaymentMessage>,
|
||||
/// Payment message receiver
|
||||
payment_rx: tokio::sync::broadcast::Receiver<PaymentMessage>,
|
||||
/// Settings
|
||||
settings: crate::config::Settings,
|
||||
// Nostr Keys
|
||||
nostr_keys: Option<Keys>,
|
||||
/// Payment Processor
|
||||
processor: Arc<dyn PaymentProcessor>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait PaymentProcessor: Send + Sync {
|
||||
/// Get invoice from processor
|
||||
async fn get_invoice(&self, keys: &Keys, amount: u64) -> Result<InvoiceInfo, Error>;
|
||||
/// Check payment status of an invoice
|
||||
async fn check_invoice(&self, payment_hash: &str) -> Result<InvoiceStatus, Error>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
|
||||
pub enum Processor {
|
||||
LNBits,
|
||||
}
|
||||
|
||||
/// Possible states of an invoice
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, sqlx::Type)]
|
||||
#[sqlx(type_name = "status")]
|
||||
pub enum InvoiceStatus {
|
||||
Unpaid,
|
||||
Paid,
|
||||
Expired,
|
||||
}
|
||||
|
||||
impl ToString for InvoiceStatus {
|
||||
fn to_string(&self) -> String {
|
||||
match self {
|
||||
InvoiceStatus::Paid => "Paid".to_string(),
|
||||
InvoiceStatus::Unpaid => "Unpaid".to_string(),
|
||||
InvoiceStatus::Expired => "Expired".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Invoice information
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct InvoiceInfo {
|
||||
pub pubkey: String,
|
||||
pub payment_hash: String,
|
||||
pub bolt11: String,
|
||||
pub amount: u64,
|
||||
pub status: InvoiceStatus,
|
||||
pub memo: String,
|
||||
pub confirmed_at: Option<u64>,
|
||||
}
|
||||
|
||||
/// Message variants for the payment channel
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum PaymentMessage {
|
||||
/// New account
|
||||
NewAccount(String),
|
||||
/// Check account,
|
||||
CheckAccount(String),
|
||||
/// Account Admitted
|
||||
AccountAdmitted(String),
|
||||
/// Invoice generated
|
||||
Invoice(String, InvoiceInfo),
|
||||
/// Invoice call back
|
||||
/// Payment hash is passed
|
||||
// This may have to be changed to better support other processors
|
||||
InvoicePaid(String),
|
||||
}
|
||||
|
||||
impl Payment {
|
||||
pub fn new(
|
||||
repo: Arc<dyn NostrRepo>,
|
||||
payment_tx: tokio::sync::broadcast::Sender<PaymentMessage>,
|
||||
payment_rx: tokio::sync::broadcast::Receiver<PaymentMessage>,
|
||||
event_tx: tokio::sync::broadcast::Sender<Event>,
|
||||
settings: crate::config::Settings,
|
||||
) -> Result<Self> {
|
||||
info!("Create payment handler");
|
||||
|
||||
// Create nostr key from sk string
|
||||
let nostr_keys = if let Some(secret_key) = &settings.pay_to_relay.secret_key {
|
||||
Some(Keys::from_sk_str(secret_key)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Create processor kind defined in settings
|
||||
let processor = match &settings.pay_to_relay.processor {
|
||||
Processor::LNBits => Arc::new(LNBitsPaymentProcessor::new(&settings)),
|
||||
};
|
||||
|
||||
Ok(Payment {
|
||||
repo,
|
||||
payment_tx,
|
||||
payment_rx,
|
||||
event_tx,
|
||||
settings,
|
||||
nostr_keys,
|
||||
processor,
|
||||
})
|
||||
}
|
||||
|
||||
/// Perform Payment tasks
|
||||
pub async fn run(&mut self) {
|
||||
loop {
|
||||
let res = self.run_internal().await;
|
||||
if let Err(e) = res {
|
||||
info!("error in payment: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal select loop for preforming payment operations
|
||||
async fn run_internal(&mut self) -> Result<()> {
|
||||
tokio::select! {
|
||||
m = self.payment_rx.recv() => {
|
||||
match m {
|
||||
Ok(PaymentMessage::NewAccount(pubkey)) => {
|
||||
info!("payment event for {:?}", pubkey);
|
||||
// REVIEW: This will need to change for cost per event
|
||||
let amount = self.settings.pay_to_relay.admission_cost;
|
||||
let invoice_info = self.get_invoice_info(&pubkey, amount).await?;
|
||||
// TODO: should handle this error
|
||||
self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok();
|
||||
},
|
||||
// Gets the most recent unpaid invoice from database
|
||||
// Checks LNbits to verify if paid/unpaid
|
||||
Ok(PaymentMessage::CheckAccount(pubkey)) => {
|
||||
let keys = Keys::from_pk_str(&pubkey)?;
|
||||
|
||||
if let Ok(Some(invoice_info)) = self.repo.get_unpaid_invoice(&keys).await {
|
||||
match self.check_invoice_status(&invoice_info.payment_hash).await? {
|
||||
InvoiceStatus::Paid => {
|
||||
self.repo.admit_account(&keys, self.settings.pay_to_relay.admission_cost).await?;
|
||||
self.payment_tx.send(PaymentMessage::AccountAdmitted(pubkey)).ok();
|
||||
}
|
||||
_ => {
|
||||
self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let amount = self.settings.pay_to_relay.admission_cost;
|
||||
let invoice_info = self.get_invoice_info(&pubkey, amount).await?;
|
||||
self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok();
|
||||
}
|
||||
}
|
||||
Ok(PaymentMessage::InvoicePaid(payment_hash)) => {
|
||||
if self.check_invoice_status(&payment_hash).await?.eq(&InvoiceStatus::Paid) {
|
||||
let pubkey = self.repo
|
||||
.update_invoice(&payment_hash, InvoiceStatus::Paid)
|
||||
.await?;
|
||||
|
||||
let key = Keys::from_pk_str(&pubkey)?;
|
||||
self.repo.admit_account(&key, self.settings.pay_to_relay.admission_cost).await?;
|
||||
}
|
||||
}
|
||||
Ok(_) => {
|
||||
// For this variant nothing need to be done here
|
||||
// it is used by `server`
|
||||
}
|
||||
Err(err) => warn!("Payment RX: {err}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sends Nostr DM to pubkey that requested invoice
|
||||
/// Two events the terms followed by the bolt11 invoice
|
||||
pub async fn send_admission_message(
|
||||
&self,
|
||||
pubkey: &str,
|
||||
invoice_info: &InvoiceInfo,
|
||||
) -> Result<()> {
|
||||
let nostr_keys = match &self.nostr_keys {
|
||||
Some(key) => key,
|
||||
None => return Err(Error::CustomError("Nostr key not defined".to_string())),
|
||||
};
|
||||
|
||||
// Create Nostr key from pk
|
||||
let key = Keys::from_pk_str(pubkey)?;
|
||||
|
||||
let pubkey = key.public_key();
|
||||
|
||||
// Event DM with terms of service
|
||||
let message_event: NostrEvent = EventBuilder::new_encrypted_direct_msg(
|
||||
nostr_keys,
|
||||
pubkey,
|
||||
&self.settings.pay_to_relay.terms_message,
|
||||
)?
|
||||
.to_event(nostr_keys)?;
|
||||
|
||||
// Event DM with invoice
|
||||
let invoice_event: NostrEvent =
|
||||
EventBuilder::new_encrypted_direct_msg(nostr_keys, pubkey, &invoice_info.bolt11)?
|
||||
.to_event(nostr_keys)?;
|
||||
|
||||
// Persist DM events to DB
|
||||
self.repo.write_event(&message_event.clone().into()).await?;
|
||||
self.repo.write_event(&invoice_event.clone().into()).await?;
|
||||
|
||||
// Broadcast DM events
|
||||
self.event_tx.send(message_event.clone().into()).ok();
|
||||
self.event_tx.send(invoice_event.clone().into()).ok();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get Invoice Info
|
||||
/// If the has an active invoice that will be return
|
||||
/// Otherwise a new invoice will be generated by the payment processor
|
||||
pub async fn get_invoice_info(&self, pubkey: &str, amount: u64) -> Result<InvoiceInfo> {
|
||||
// If user is already in DB this will be false
|
||||
// This avoids recreating admission invoices
|
||||
// I think it will continue to send DMs with the invoice
|
||||
// If client continues to try and write to the relay (will be same invoice)
|
||||
let key = Keys::from_pk_str(pubkey)?;
|
||||
if !self.repo.create_account(&key).await? {
|
||||
if let Ok(Some(invoice_info)) = self.repo.get_unpaid_invoice(&key).await {
|
||||
return Ok(invoice_info);
|
||||
}
|
||||
}
|
||||
|
||||
let key = Keys::from_pk_str(pubkey)?;
|
||||
|
||||
let invoice_info = self.processor.get_invoice(&key, amount).await?;
|
||||
|
||||
// Persist invoice to DB
|
||||
self.repo
|
||||
.create_invoice_record(&key, invoice_info.clone())
|
||||
.await?;
|
||||
|
||||
if self.settings.pay_to_relay.direct_message {
|
||||
// Admission event invoice and terms to pubkey that is joining
|
||||
self.send_admission_message(pubkey, &invoice_info).await?;
|
||||
}
|
||||
|
||||
Ok(invoice_info)
|
||||
}
|
||||
|
||||
/// Check paid status of invoice with LNbits
|
||||
pub async fn check_invoice_status(&self, payment_hash: &str) -> Result<InvoiceStatus, Error> {
|
||||
// Check base if passed expiry time
|
||||
let status = self.processor.check_invoice(payment_hash).await?;
|
||||
self.repo
|
||||
.update_invoice(payment_hash, status.clone())
|
||||
.await?;
|
||||
|
||||
Ok(status)
|
||||
}
|
||||
}
|
@@ -2,15 +2,17 @@ use crate::db::QueryResult;
|
||||
use crate::error::Result;
|
||||
use crate::event::Event;
|
||||
use crate::nip05::VerificationRecord;
|
||||
use crate::payment::{InvoiceInfo, InvoiceStatus};
|
||||
use crate::subscription::Subscription;
|
||||
use crate::utils::unix_time;
|
||||
use async_trait::async_trait;
|
||||
use nostr::Keys;
|
||||
use rand::Rng;
|
||||
|
||||
pub mod sqlite;
|
||||
pub mod sqlite_migration;
|
||||
pub mod postgres;
|
||||
pub mod postgres_migration;
|
||||
pub mod sqlite;
|
||||
pub mod sqlite_migration;
|
||||
|
||||
#[async_trait]
|
||||
pub trait NostrRepo: Send + Sync {
|
||||
@@ -57,6 +59,33 @@ pub trait NostrRepo: Send + Sync {
|
||||
|
||||
/// Get oldest verification before timestamp
|
||||
async fn get_oldest_user_verification(&self, before: u64) -> Result<VerificationRecord>;
|
||||
|
||||
/// Create a new account
|
||||
async fn create_account(&self, pubkey: &Keys) -> Result<bool>;
|
||||
|
||||
/// Admit an account
|
||||
async fn admit_account(&self, pubkey: &Keys, admission_cost: u64) -> Result<()>;
|
||||
|
||||
/// Gets user balance if they are an admitted pubkey
|
||||
async fn get_account_balance(&self, pubkey: &Keys) -> Result<(bool, u64)>;
|
||||
|
||||
/// Update account balance
|
||||
async fn update_account_balance(
|
||||
&self,
|
||||
pub_key: &Keys,
|
||||
positive: bool,
|
||||
new_balance: u64,
|
||||
) -> Result<()>;
|
||||
|
||||
/// Create invoice record
|
||||
async fn create_invoice_record(&self, pubkey: &Keys, invoice_info: InvoiceInfo) -> Result<()>;
|
||||
|
||||
/// Update Invoice for given payment hash
|
||||
async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result<String>;
|
||||
|
||||
/// Get the most recent invoice for a given pubkey
|
||||
/// invoice must be unpaid and not expired
|
||||
async fn get_unpaid_invoice(&self, pubkey: &Keys) -> Result<Option<InvoiceInfo>>;
|
||||
}
|
||||
|
||||
// Current time, with a slight forward jitter in seconds
|
||||
|
@@ -2,57 +2,97 @@ use crate::db::QueryResult;
|
||||
use crate::error::Result;
|
||||
use crate::event::{single_char_tagname, Event};
|
||||
use crate::nip05::{Nip05Name, VerificationRecord};
|
||||
use crate::payment::{InvoiceInfo, InvoiceStatus};
|
||||
use crate::repo::{now_jitter, NostrRepo};
|
||||
use crate::subscription::{ReqFilter, Subscription};
|
||||
use async_std::stream::StreamExt;
|
||||
use async_trait::async_trait;
|
||||
use chrono::{DateTime, TimeZone, Utc};
|
||||
use sqlx::postgres::PgRow;
|
||||
use sqlx::Error::RowNotFound;
|
||||
use sqlx::{Error, Execute, FromRow, Postgres, QueryBuilder, Row};
|
||||
use std::time::{Duration, Instant};
|
||||
use sqlx::Error::RowNotFound;
|
||||
|
||||
use crate::error;
|
||||
use crate::hexrange::{hex_range, HexSearch};
|
||||
use crate::repo::postgres_migration::run_migrations;
|
||||
use crate::server::NostrMetrics;
|
||||
use crate::utils::{is_hex, is_lower_hex};
|
||||
use crate::utils::{self, is_hex, is_lower_hex};
|
||||
use nostr::key::Keys;
|
||||
use tokio::sync::mpsc::Sender;
|
||||
use tokio::sync::oneshot::Receiver;
|
||||
use tracing::log::trace;
|
||||
use tracing::{debug, error, info};
|
||||
use crate::error;
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
pub type PostgresPool = sqlx::pool::Pool<Postgres>;
|
||||
|
||||
pub struct PostgresRepo {
|
||||
conn: PostgresPool,
|
||||
conn_write: PostgresPool,
|
||||
metrics: NostrMetrics,
|
||||
}
|
||||
|
||||
impl PostgresRepo {
|
||||
pub fn new(c: PostgresPool, m: NostrMetrics) -> PostgresRepo {
|
||||
pub fn new(c: PostgresPool, cw: PostgresPool, m: NostrMetrics) -> PostgresRepo {
|
||||
PostgresRepo {
|
||||
conn: c,
|
||||
conn_write: cw,
|
||||
metrics: m,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Cleanup expired events on a regular basis
|
||||
async fn cleanup_expired(conn: PostgresPool, frequency: Duration) -> Result<()> {
|
||||
tokio::task::spawn(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = tokio::time::sleep(frequency) => {
|
||||
let start = Instant::now();
|
||||
let exp_res = delete_expired(conn.clone()).await;
|
||||
match exp_res {
|
||||
Ok(exp_count) => {
|
||||
if exp_count > 0 {
|
||||
info!("removed {} expired events in: {:?}", exp_count, start.elapsed());
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
warn!("could not remove expired events due to error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// One-time deletion of all expired events
|
||||
async fn delete_expired(conn: PostgresPool) -> Result<u64> {
|
||||
let mut tx = conn.begin().await?;
|
||||
let update_count = sqlx::query("DELETE FROM \"event\" WHERE expires_at <= $1;")
|
||||
.bind(Utc.timestamp_opt(utils::unix_time() as i64, 0).unwrap())
|
||||
.execute(&mut tx)
|
||||
.await?
|
||||
.rows_affected();
|
||||
tx.commit().await?;
|
||||
Ok(update_count)
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl NostrRepo for PostgresRepo {
|
||||
|
||||
async fn start(&self) -> Result<()> {
|
||||
info!("not implemented");
|
||||
Ok(())
|
||||
// begin a cleanup task for expired events.
|
||||
cleanup_expired(self.conn_write.clone(), Duration::from_secs(600)).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn migrate_up(&self) -> Result<usize> {
|
||||
Ok(run_migrations(&self.conn).await?)
|
||||
Ok(run_migrations(&self.conn_write).await?)
|
||||
}
|
||||
|
||||
async fn write_event(&self, e: &Event) -> Result<u64> {
|
||||
// start transaction
|
||||
let mut tx = self.conn.begin().await?;
|
||||
let mut tx = self.conn_write.begin().await?;
|
||||
let start = Instant::now();
|
||||
|
||||
// get relevant fields from event and convert to blobs.
|
||||
@@ -66,7 +106,7 @@ impl NostrRepo for PostgresRepo {
|
||||
// replaceable event or parameterized replaceable event.
|
||||
if e.is_replaceable() {
|
||||
let repl_count = sqlx::query(
|
||||
"SELECT e.id FROM event e WHERE e.pub_key=? AND e.kind=? AND e.created_at >= ? LIMIT 1;")
|
||||
"SELECT e.id FROM event e WHERE e.pub_key=$1 AND e.kind=$2 AND e.created_at >= $3 LIMIT 1;")
|
||||
.bind(&pubkey_blob)
|
||||
.bind(e.kind as i64)
|
||||
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
|
||||
@@ -77,49 +117,52 @@ impl NostrRepo for PostgresRepo {
|
||||
}
|
||||
}
|
||||
if let Some(d_tag) = e.distinct_param() {
|
||||
let repl_count:i64;
|
||||
if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) {
|
||||
repl_count = sqlx::query_scalar(
|
||||
let repl_count: i64 = if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) {
|
||||
sqlx::query_scalar(
|
||||
"SELECT count(*) AS count FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.pub_key=$1 AND e.kind=$2 AND t.name='d' AND t.value_hex=$3 AND e.created_at >= $4 LIMIT 1;")
|
||||
.bind(hex::decode(&e.pubkey).ok())
|
||||
.bind(e.kind as i64)
|
||||
.bind(hex::decode(d_tag).ok())
|
||||
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
|
||||
.fetch_one(&mut tx)
|
||||
.await?;
|
||||
.await?
|
||||
} else {
|
||||
repl_count = sqlx::query_scalar(
|
||||
sqlx::query_scalar(
|
||||
"SELECT count(*) AS count FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.pub_key=$1 AND e.kind=$2 AND t.name='d' AND t.value=$3 AND e.created_at >= $4 LIMIT 1;")
|
||||
.bind(hex::decode(&e.pubkey).ok())
|
||||
.bind(e.kind as i64)
|
||||
.bind(d_tag.as_bytes())
|
||||
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
|
||||
.fetch_one(&mut tx)
|
||||
.await?;
|
||||
}
|
||||
.await?
|
||||
};
|
||||
// if any rows were returned, then some newer event with
|
||||
// the same author/kind/tag value exist, and we can ignore
|
||||
// this event.
|
||||
if repl_count > 0 {
|
||||
return Ok(0)
|
||||
return Ok(0);
|
||||
}
|
||||
}
|
||||
// ignore if the event hash is a duplicate.
|
||||
let mut ins_count = sqlx::query(
|
||||
r#"INSERT INTO "event"
|
||||
(id, pub_key, created_at, kind, "content", delegated_by)
|
||||
VALUES($1, $2, $3, $4, $5, $6)
|
||||
(id, pub_key, created_at, expires_at, kind, "content", delegated_by)
|
||||
VALUES($1, $2, $3, $4, $5, $6, $7)
|
||||
ON CONFLICT (id) DO NOTHING"#,
|
||||
)
|
||||
.bind(&id_blob)
|
||||
.bind(&pubkey_blob)
|
||||
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
|
||||
.bind(e.kind as i64)
|
||||
.bind(event_str.into_bytes())
|
||||
.bind(delegator_blob)
|
||||
.execute(&mut tx)
|
||||
.await?
|
||||
.rows_affected();
|
||||
.bind(&id_blob)
|
||||
.bind(&pubkey_blob)
|
||||
.bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap())
|
||||
.bind(
|
||||
e.expiration()
|
||||
.and_then(|x| Utc.timestamp_opt(x as i64, 0).latest()),
|
||||
)
|
||||
.bind(e.kind as i64)
|
||||
.bind(event_str.into_bytes())
|
||||
.bind(delegator_blob)
|
||||
.execute(&mut tx)
|
||||
.await?
|
||||
.rows_affected();
|
||||
|
||||
if ins_count == 0 {
|
||||
// if the event was a duplicate, no need to insert event or
|
||||
@@ -135,25 +178,27 @@ ON CONFLICT (id) DO NOTHING"#,
|
||||
let tag_val = &tag[1];
|
||||
// only single-char tags are searchable
|
||||
let tag_char_opt = single_char_tagname(tag_name);
|
||||
let query = "INSERT INTO tag (event_id, \"name\", value) VALUES($1, $2, $3) \
|
||||
ON CONFLICT (event_id, \"name\", value) DO NOTHING";
|
||||
match &tag_char_opt {
|
||||
Some(_) => {
|
||||
// if tag value is lowercase hex;
|
||||
if is_lower_hex(tag_val) && (tag_val.len() % 2 == 0) {
|
||||
sqlx::query(query)
|
||||
sqlx::query("INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES($1, $2, NULL, $3) \
|
||||
ON CONFLICT (event_id, \"name\", value, value_hex) DO NOTHING")
|
||||
.bind(&id_blob)
|
||||
.bind(tag_name)
|
||||
.bind(hex::decode(tag_val).ok())
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
.await
|
||||
.unwrap();
|
||||
} else {
|
||||
sqlx::query(query)
|
||||
sqlx::query("INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES($1, $2, $3, NULL) \
|
||||
ON CONFLICT (event_id, \"name\", value, value_hex) DO NOTHING")
|
||||
.bind(&id_blob)
|
||||
.bind(tag_name)
|
||||
.bind(tag_val.as_bytes())
|
||||
.execute(&mut tx)
|
||||
.await?;
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
@@ -178,22 +223,21 @@ ON CONFLICT (id) DO NOTHING"#,
|
||||
// parameterized replaceable events
|
||||
// check for parameterized replaceable events that would be hidden; don't insert these either.
|
||||
if let Some(d_tag) = e.distinct_param() {
|
||||
let update_count;
|
||||
if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) {
|
||||
update_count = sqlx::query("DELETE FROM event WHERE kind=$1 AND pub_key=$2 AND id NOT IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=$1 AND e.pub_key=$2 AND t.name='d' AND t.value_hex=$3 ORDER BY created_at DESC LIMIT 1);")
|
||||
let update_count = if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) {
|
||||
sqlx::query("DELETE FROM event WHERE kind=$1 AND pub_key=$2 AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=$1 AND e.pub_key=$2 AND t.name='d' AND t.value_hex=$3 ORDER BY created_at DESC OFFSET 1);")
|
||||
.bind(e.kind as i64)
|
||||
.bind(hex::decode(&e.pubkey).ok())
|
||||
.bind(hex::decode(d_tag).ok())
|
||||
.execute(&mut tx)
|
||||
.await?.rows_affected();
|
||||
.await?.rows_affected()
|
||||
} else {
|
||||
update_count = sqlx::query("DELETE FROM event WHERE kind=$1 AND pub_key=$2 AND id NOT IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=$1 AND e.pub_key=$2 AND t.name='d' AND t.value=$3 ORDER BY created_at DESC LIMIT 1);")
|
||||
sqlx::query("DELETE FROM event WHERE kind=$1 AND pub_key=$2 AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=$1 AND e.pub_key=$2 AND t.name='d' AND t.value=$3 ORDER BY created_at DESC OFFSET 1);")
|
||||
.bind(e.kind as i64)
|
||||
.bind(hex::decode(&e.pubkey).ok())
|
||||
.bind(d_tag.as_bytes())
|
||||
.execute(&mut tx)
|
||||
.await?.rows_affected();
|
||||
}
|
||||
.await?.rows_affected()
|
||||
};
|
||||
if update_count > 0 {
|
||||
info!(
|
||||
"removed {} older parameterized replaceable kind {} events for author: {:?}",
|
||||
@@ -238,10 +282,10 @@ ON CONFLICT (id) DO NOTHING"#,
|
||||
LEFT JOIN tag t ON e.id = t.event_id \
|
||||
WHERE e.pub_key = $1 AND t.\"name\" = 'e' AND e.kind = 5 AND t.value = $2 LIMIT 1",
|
||||
)
|
||||
.bind(&pubkey_blob)
|
||||
.bind(&id_blob)
|
||||
.fetch_optional(&mut tx)
|
||||
.await?;
|
||||
.bind(&pubkey_blob)
|
||||
.bind(&id_blob)
|
||||
.fetch_optional(&mut tx)
|
||||
.await?;
|
||||
|
||||
// check if a the query returned a result, meaning we should
|
||||
// hid the current event
|
||||
@@ -342,7 +386,10 @@ ON CONFLICT (id) DO NOTHING"#,
|
||||
|
||||
// check if this is still active; every 100 rows
|
||||
if row_count % 100 == 0 && abandon_query_rx.try_recv().is_ok() {
|
||||
debug!("query cancelled by client (cid: {}, sub: {:?})", client_id, sub.id);
|
||||
debug!(
|
||||
"query cancelled by client (cid: {}, sub: {:?})",
|
||||
client_id, sub.id
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -358,7 +405,10 @@ ON CONFLICT (id) DO NOTHING"#,
|
||||
if last_successful_send + abort_cutoff < Instant::now() {
|
||||
// the queue has been full for too long, abort
|
||||
info!("aborting database query due to slow client");
|
||||
metrics.query_aborts.with_label_values(&["slowclient"]).inc();
|
||||
metrics
|
||||
.query_aborts
|
||||
.with_label_values(&["slowclient"])
|
||||
.inc();
|
||||
return Ok(());
|
||||
}
|
||||
// give the queue a chance to clear before trying again
|
||||
@@ -406,7 +456,7 @@ ON CONFLICT (id) DO NOTHING"#,
|
||||
}
|
||||
|
||||
async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()> {
|
||||
let mut tx = self.conn.begin().await?;
|
||||
let mut tx = self.conn_write.begin().await?;
|
||||
|
||||
sqlx::query("DELETE FROM user_verification WHERE \"name\" = $1")
|
||||
.bind(name)
|
||||
@@ -429,12 +479,10 @@ ON CONFLICT (id) DO NOTHING"#,
|
||||
let verify_time = now_jitter(600);
|
||||
|
||||
// update verification time and reset any failure count
|
||||
sqlx::query(
|
||||
"UPDATE user_verification SET verified_at = $1, fail_count = 0 WHERE id = $2",
|
||||
)
|
||||
sqlx::query("UPDATE user_verification SET verified_at = $1, fail_count = 0 WHERE id = $2")
|
||||
.bind(Utc.timestamp_opt(verify_time as i64, 0).unwrap())
|
||||
.bind(id as i64)
|
||||
.execute(&self.conn)
|
||||
.execute(&self.conn_write)
|
||||
.await?;
|
||||
|
||||
info!("verification updated for {}", id);
|
||||
@@ -444,7 +492,7 @@ ON CONFLICT (id) DO NOTHING"#,
|
||||
async fn fail_verification(&self, id: u64) -> Result<()> {
|
||||
sqlx::query("UPDATE user_verification SET failed_at = now(), fail_count = fail_count + 1 WHERE id = $1")
|
||||
.bind(id as i64)
|
||||
.execute(&self.conn)
|
||||
.execute(&self.conn_write)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -452,7 +500,7 @@ ON CONFLICT (id) DO NOTHING"#,
|
||||
async fn delete_verification(&self, id: u64) -> Result<()> {
|
||||
sqlx::query("DELETE FROM user_verification WHERE id = $1")
|
||||
.bind(id as i64)
|
||||
.execute(&self.conn)
|
||||
.execute(&self.conn_write)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -501,6 +549,172 @@ ON CONFLICT (id) DO NOTHING"#,
|
||||
.await?
|
||||
.ok_or(error::Error::SqlxError(RowNotFound))
|
||||
}
|
||||
|
||||
async fn create_account(&self, pub_key: &Keys) -> Result<bool> {
|
||||
let pub_key = pub_key.public_key().to_string();
|
||||
let mut tx = self.conn_write.begin().await?;
|
||||
|
||||
let result = sqlx::query("INSERT INTO account (pubkey, balance) VALUES ($1, 0);")
|
||||
.bind(pub_key)
|
||||
.execute(&mut tx)
|
||||
.await;
|
||||
|
||||
let success = match result {
|
||||
Ok(res) => {
|
||||
tx.commit().await?;
|
||||
res.rows_affected() == 1
|
||||
}
|
||||
Err(_err) => false,
|
||||
};
|
||||
|
||||
Ok(success)
|
||||
}
|
||||
|
||||
/// Admit account
|
||||
async fn admit_account(&self, pub_key: &Keys, admission_cost: u64) -> Result<()> {
|
||||
let pub_key = pub_key.public_key().to_string();
|
||||
sqlx::query(
|
||||
"UPDATE account SET is_admitted = TRUE, balance = balance - $1 WHERE pubkey = $2",
|
||||
)
|
||||
.bind(admission_cost as i64)
|
||||
.bind(pub_key)
|
||||
.execute(&self.conn_write)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets if the account is admitted and balance
|
||||
async fn get_account_balance(&self, pub_key: &Keys) -> Result<(bool, u64)> {
|
||||
let pub_key = pub_key.public_key().to_string();
|
||||
let query = r#"SELECT
|
||||
is_admitted,
|
||||
balance
|
||||
FROM account
|
||||
WHERE pubkey = $1
|
||||
LIMIT 1"#;
|
||||
|
||||
let result = sqlx::query_as::<_, (bool, i64)>(query)
|
||||
.bind(pub_key)
|
||||
.fetch_optional(&self.conn_write)
|
||||
.await?
|
||||
.ok_or(error::Error::SqlxError(RowNotFound))?;
|
||||
|
||||
Ok((result.0, result.1 as u64))
|
||||
}
|
||||
|
||||
/// Update account balance
|
||||
async fn update_account_balance(
|
||||
&self,
|
||||
pub_key: &Keys,
|
||||
positive: bool,
|
||||
new_balance: u64,
|
||||
) -> Result<()> {
|
||||
let pub_key = pub_key.public_key().to_string();
|
||||
match positive {
|
||||
true => {
|
||||
sqlx::query("UPDATE account SET balance = balance + $1 WHERE pubkey = $2")
|
||||
.bind(new_balance as i64)
|
||||
.bind(pub_key)
|
||||
.execute(&self.conn_write)
|
||||
.await?
|
||||
}
|
||||
false => {
|
||||
sqlx::query("UPDATE account SET balance = balance - $1 WHERE pubkey = $2")
|
||||
.bind(new_balance as i64)
|
||||
.bind(pub_key)
|
||||
.execute(&self.conn_write)
|
||||
.await?
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create invoice record
|
||||
async fn create_invoice_record(&self, pub_key: &Keys, invoice_info: InvoiceInfo) -> Result<()> {
|
||||
let pub_key = pub_key.public_key().to_string();
|
||||
let mut tx = self.conn_write.begin().await?;
|
||||
|
||||
sqlx::query(
|
||||
"INSERT INTO invoice (pubkey, payment_hash, amount, status, description, created_at, invoice) VALUES ($1, $2, $3, $4, $5, now(), $6)",
|
||||
)
|
||||
.bind(pub_key)
|
||||
.bind(invoice_info.payment_hash)
|
||||
.bind(invoice_info.amount as i64)
|
||||
.bind(invoice_info.status)
|
||||
.bind(invoice_info.memo)
|
||||
.bind(invoice_info.bolt11)
|
||||
.execute(&mut tx)
|
||||
.await.unwrap();
|
||||
|
||||
debug!("Invoice added");
|
||||
|
||||
tx.commit().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update invoice record
|
||||
async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result<String> {
|
||||
debug!("Payment Hash: {}", payment_hash);
|
||||
let query = "SELECT pubkey, status, amount FROM invoice WHERE payment_hash=$1;";
|
||||
let (pubkey, prev_invoice_status, amount) =
|
||||
sqlx::query_as::<_, (String, InvoiceStatus, i64)>(query)
|
||||
.bind(payment_hash)
|
||||
.fetch_optional(&self.conn_write)
|
||||
.await?
|
||||
.ok_or(error::Error::SqlxError(RowNotFound))?;
|
||||
|
||||
// If the invoice is paid update the confirmed at timestamp
|
||||
let query = if status.eq(&InvoiceStatus::Paid) {
|
||||
"UPDATE invoice SET status=$1, confirmed_at = now() WHERE payment_hash=$2;"
|
||||
} else {
|
||||
"UPDATE invoice SET status=$1 WHERE payment_hash=$2;"
|
||||
};
|
||||
|
||||
sqlx::query(query)
|
||||
.bind(&status)
|
||||
.bind(payment_hash)
|
||||
.execute(&self.conn_write)
|
||||
.await?;
|
||||
|
||||
if prev_invoice_status.eq(&InvoiceStatus::Unpaid) && status.eq(&InvoiceStatus::Paid) {
|
||||
sqlx::query("UPDATE account SET balance = balance + $1 WHERE pubkey = $2")
|
||||
.bind(amount)
|
||||
.bind(&pubkey)
|
||||
.execute(&self.conn_write)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(pubkey)
|
||||
}
|
||||
|
||||
/// Get the most recent invoice for a given pubkey
|
||||
/// invoice must be unpaid and not expired
|
||||
async fn get_unpaid_invoice(&self, pubkey: &Keys) -> Result<Option<InvoiceInfo>> {
|
||||
let query = r#"
|
||||
SELECT amount, payment_hash, description, invoice
|
||||
FROM invoice
|
||||
WHERE pubkey = $1
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 1;
|
||||
"#;
|
||||
match sqlx::query_as::<_, (i64, String, String, String)>(query)
|
||||
.bind(pubkey.public_key().to_string())
|
||||
.fetch_optional(&self.conn_write)
|
||||
.await
|
||||
.unwrap()
|
||||
{
|
||||
Some((amount, payment_hash, description, invoice)) => Ok(Some(InvoiceInfo {
|
||||
pubkey: pubkey.public_key().to_string(),
|
||||
payment_hash,
|
||||
bolt11: invoice,
|
||||
amount: amount as u64,
|
||||
status: InvoiceStatus::Unpaid,
|
||||
memo: description,
|
||||
confirmed_at: None,
|
||||
})),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a dynamic SQL query and params from a subscription filter.
|
||||
@@ -512,6 +726,7 @@ fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
|
||||
|
||||
let mut query = QueryBuilder::new("SELECT e.\"content\", e.created_at FROM \"event\" e WHERE ");
|
||||
|
||||
// This tracks whether we need to push a prefix AND before adding another clause
|
||||
let mut push_and = false;
|
||||
// Query for "authors", allowing prefix matches
|
||||
if let Some(auth_vec) = &f.authors {
|
||||
@@ -688,7 +903,7 @@ fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
|
||||
}
|
||||
push_and = true;
|
||||
query
|
||||
.push("e.created_at > ")
|
||||
.push("e.created_at >= ")
|
||||
.push_bind(Utc.timestamp_opt(f.since.unwrap() as i64, 0).unwrap());
|
||||
}
|
||||
|
||||
@@ -699,7 +914,7 @@ fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
|
||||
}
|
||||
push_and = true;
|
||||
query
|
||||
.push("e.created_at < ")
|
||||
.push("e.created_at <= ")
|
||||
.push_bind(Utc.timestamp_opt(f.until.unwrap() as i64, 0).unwrap());
|
||||
}
|
||||
|
||||
@@ -709,6 +924,11 @@ fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
|
||||
} else {
|
||||
query.push("e.hidden != 1::bit(1)");
|
||||
}
|
||||
// never display expired events
|
||||
query
|
||||
.push(" AND (e.expires_at IS NULL OR e.expires_at > ")
|
||||
.push_bind(Utc.timestamp_opt(utils::unix_time() as i64, 0).unwrap())
|
||||
.push(")");
|
||||
|
||||
// Apply per-filter limit to this query.
|
||||
// The use of a LIMIT implies a DESC order, to capture only the most recent events.
|
||||
@@ -724,15 +944,17 @@ fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
|
||||
|
||||
impl FromRow<'_, PgRow> for VerificationRecord {
|
||||
fn from_row(row: &'_ PgRow) -> std::result::Result<Self, Error> {
|
||||
let name =
|
||||
Nip05Name::try_from(row.get::<'_, &str, &str>("name")).or(Err(RowNotFound))?;
|
||||
let name = Nip05Name::try_from(row.get::<'_, &str, &str>("name")).or(Err(RowNotFound))?;
|
||||
Ok(VerificationRecord {
|
||||
rowid: row.get::<'_, i64, &str>("id") as u64,
|
||||
name,
|
||||
address: hex::encode(row.get::<'_, Vec<u8>, &str>("pub_key")),
|
||||
event: hex::encode(row.get::<'_, Vec<u8>, &str>("event_id")),
|
||||
event_created: row.get::<'_, DateTime<Utc>, &str>("created_at").timestamp() as u64,
|
||||
last_success: None,
|
||||
last_success: match row.try_get::<'_, DateTime<Utc>, &str>("verified_at") {
|
||||
Ok(x) => Some(x.timestamp() as u64),
|
||||
_ => None,
|
||||
},
|
||||
last_failure: match row.try_get::<'_, DateTime<Utc>, &str>("failed_at") {
|
||||
Ok(x) => Some(x.timestamp() as u64),
|
||||
_ => None,
|
||||
|
@@ -34,11 +34,17 @@ pub async fn run_migrations(db: &PostgresPool) -> crate::error::Result<usize> {
|
||||
if m002_result == MigrationResult::Upgraded {
|
||||
m002::rebuild_tags(db).await?;
|
||||
}
|
||||
run_migration(m003::migration(), db).await;
|
||||
run_migration(m004::migration(), db).await;
|
||||
run_migration(m005::migration(), db).await;
|
||||
Ok(current_version(db).await as usize)
|
||||
}
|
||||
|
||||
async fn current_version(db: &PostgresPool) -> i64 {
|
||||
sqlx::query_scalar("SELECT max(serial_number) FROM migrations;").fetch_one(db).await.unwrap()
|
||||
sqlx::query_scalar("SELECT max(serial_number) FROM migrations;")
|
||||
.fetch_one(db)
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
async fn prepare_migrations_table(db: &PostgresPool) {
|
||||
@@ -77,7 +83,7 @@ async fn run_migration(migration: impl Migration, db: &PostgresPool) -> Migratio
|
||||
.unwrap();
|
||||
|
||||
transaction.commit().await.unwrap();
|
||||
return MigrationResult::Upgraded;
|
||||
MigrationResult::Upgraded
|
||||
}
|
||||
|
||||
mod m001 {
|
||||
@@ -117,7 +123,7 @@ CREATE TABLE "tag" (
|
||||
CREATE INDEX tag_event_id_idx ON tag USING btree (event_id, name);
|
||||
CREATE INDEX tag_value_idx ON tag USING btree (value);
|
||||
|
||||
-- NIP-05 Verfication table
|
||||
-- NIP-05 Verification table
|
||||
CREATE TABLE "user_verification" (
|
||||
id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY,
|
||||
event_id bytea NOT NULL,
|
||||
@@ -137,15 +143,15 @@ CREATE INDEX user_verification_name_idx ON user_verification USING btree (name);
|
||||
}
|
||||
|
||||
mod m002 {
|
||||
use async_std::stream::StreamExt;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use sqlx::Row;
|
||||
use std::time::Instant;
|
||||
use tracing::info;
|
||||
use async_std::stream::StreamExt;
|
||||
use sqlx::Row;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
|
||||
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
|
||||
use crate::event::{single_char_tagname, Event};
|
||||
use crate::repo::postgres::PostgresPool;
|
||||
use crate::event::{Event, single_char_tagname};
|
||||
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
|
||||
use crate::utils::is_lower_hex;
|
||||
|
||||
pub const VERSION: i64 = 2;
|
||||
@@ -172,23 +178,31 @@ CREATE INDEX tag_value_hex_idx ON tag USING btree (value_hex);
|
||||
let mut tx = db.begin().await.unwrap();
|
||||
let mut update_tx = db.begin().await.unwrap();
|
||||
// Clear out table
|
||||
sqlx::query("DELETE FROM tag;").execute(&mut update_tx).await?;
|
||||
sqlx::query("DELETE FROM tag;")
|
||||
.execute(&mut update_tx)
|
||||
.await?;
|
||||
{
|
||||
let event_count: i64 =
|
||||
sqlx::query_scalar("SELECT COUNT(*) from event;")
|
||||
let event_count: i64 = sqlx::query_scalar("SELECT COUNT(*) from event;")
|
||||
.fetch_one(&mut tx)
|
||||
.await
|
||||
.unwrap();
|
||||
let bar = ProgressBar::new(event_count.try_into().unwrap()).with_message("rebuilding tags table");
|
||||
bar.set_style(ProgressStyle::with_template("[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}").unwrap());
|
||||
let mut events = sqlx::query("SELECT id, content FROM event ORDER BY id;").fetch(&mut tx);
|
||||
let bar = ProgressBar::new(event_count.try_into().unwrap())
|
||||
.with_message("rebuilding tags table");
|
||||
bar.set_style(
|
||||
ProgressStyle::with_template(
|
||||
"[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
let mut events =
|
||||
sqlx::query("SELECT id, content FROM event ORDER BY id;").fetch(&mut tx);
|
||||
while let Some(row) = events.next().await {
|
||||
bar.inc(1);
|
||||
// get the row id and content
|
||||
let row = row.unwrap();
|
||||
let event_id: Vec<u8> = row.get(0);
|
||||
let event_bytes: Vec<u8> = row.get(1);
|
||||
let event:Event = serde_json::from_str(&String::from_utf8(event_bytes).unwrap())?;
|
||||
let event: Event = serde_json::from_str(&String::from_utf8(event_bytes).unwrap())?;
|
||||
|
||||
for t in event.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
@@ -201,13 +215,22 @@ CREATE INDEX tag_value_hex_idx ON tag USING btree (value_hex);
|
||||
// insert as BLOB if we can restore it losslessly.
|
||||
// this means it needs to be even length and lowercase.
|
||||
if (tagval.len() % 2 == 0) && is_lower_hex(tagval) {
|
||||
let q = "INSERT INTO tag (event_id, \"name\", value_hex) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING;";
|
||||
sqlx::query(q).bind(&event_id).bind(&tagname).bind(hex::decode(tagval).ok()).execute(&mut update_tx).await?;
|
||||
let q = "INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES ($1, $2, NULL, $3) ON CONFLICT DO NOTHING;";
|
||||
sqlx::query(q)
|
||||
.bind(&event_id)
|
||||
.bind(tagname)
|
||||
.bind(hex::decode(tagval).ok())
|
||||
.execute(&mut update_tx)
|
||||
.await?;
|
||||
} else {
|
||||
let q = "INSERT INTO tag (event_id, \"name\", value) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING;";
|
||||
sqlx::query(q).bind(&event_id).bind(&tagname).bind(tagval.as_bytes()).execute(&mut update_tx).await?;
|
||||
let q = "INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES ($1, $2, $3, NULL) ON CONFLICT DO NOTHING;";
|
||||
sqlx::query(q)
|
||||
.bind(&event_id)
|
||||
.bind(tagname)
|
||||
.bind(tagval.as_bytes())
|
||||
.execute(&mut update_tx)
|
||||
.await?;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
update_tx.commit().await?;
|
||||
@@ -217,3 +240,81 @@ CREATE INDEX tag_value_hex_idx ON tag USING btree (value_hex);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
mod m003 {
|
||||
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
|
||||
|
||||
pub const VERSION: i64 = 3;
|
||||
|
||||
pub fn migration() -> impl Migration {
|
||||
SimpleSqlMigration {
|
||||
serial_number: VERSION,
|
||||
sql: vec![
|
||||
r#"
|
||||
-- Add unique constraint on tag
|
||||
ALTER TABLE tag ADD CONSTRAINT unique_constraint_name UNIQUE (event_id, "name", value, value_hex);
|
||||
"#,
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod m004 {
|
||||
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
|
||||
|
||||
pub const VERSION: i64 = 4;
|
||||
|
||||
pub fn migration() -> impl Migration {
|
||||
SimpleSqlMigration {
|
||||
serial_number: VERSION,
|
||||
sql: vec![
|
||||
r#"
|
||||
-- Add expiration time for events
|
||||
ALTER TABLE event ADD COLUMN expires_at timestamp(0) with time zone;
|
||||
-- Index expiration time
|
||||
CREATE INDEX event_expires_at_idx ON "event" (expires_at);
|
||||
"#,
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod m005 {
|
||||
use crate::repo::postgres_migration::{Migration, SimpleSqlMigration};
|
||||
|
||||
pub const VERSION: i64 = 5;
|
||||
|
||||
pub fn migration() -> impl Migration {
|
||||
SimpleSqlMigration {
|
||||
serial_number: VERSION,
|
||||
sql: vec![
|
||||
r#"
|
||||
-- Create account table
|
||||
CREATE TABLE "account" (
|
||||
pubkey varchar NOT NULL,
|
||||
is_admitted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
balance BIGINT NOT NULL DEFAULT 0,
|
||||
tos_accepted_at TIMESTAMP,
|
||||
CONSTRAINT account_pkey PRIMARY KEY (pubkey)
|
||||
);
|
||||
|
||||
CREATE TYPE status AS ENUM ('Paid', 'Unpaid', 'Expired');
|
||||
|
||||
|
||||
CREATE TABLE "invoice" (
|
||||
payment_hash varchar NOT NULL,
|
||||
pubkey varchar NOT NULL,
|
||||
invoice varchar NOT NULL,
|
||||
amount BIGINT NOT NULL,
|
||||
status status NOT NULL DEFAULT 'Unpaid',
|
||||
description varchar,
|
||||
created_at timestamp,
|
||||
confirmed_at timestamp,
|
||||
CONSTRAINT invoice_payment_hash PRIMARY KEY (payment_hash),
|
||||
CONSTRAINT invoice_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES account (pubkey) ON DELETE CASCADE
|
||||
);
|
||||
"#,
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,34 +1,36 @@
|
||||
//! Event persistence and querying
|
||||
//use crate::config::SETTINGS;
|
||||
use crate::config::Settings;
|
||||
use crate::error::Result;
|
||||
use crate::db::QueryResult;
|
||||
use crate::error::{Error::SqlError, Result};
|
||||
use crate::event::{single_char_tagname, Event};
|
||||
use crate::hexrange::hex_range;
|
||||
use crate::hexrange::HexSearch;
|
||||
use crate::repo::sqlite_migration::{STARTUP_SQL,upgrade_db};
|
||||
use crate::utils::{is_hex, is_lower_hex};
|
||||
use crate::nip05::{Nip05Name, VerificationRecord};
|
||||
use crate::subscription::{ReqFilter, Subscription};
|
||||
use crate::payment::{InvoiceInfo, InvoiceStatus};
|
||||
use crate::repo::sqlite_migration::{upgrade_db, STARTUP_SQL};
|
||||
use crate::server::NostrMetrics;
|
||||
use crate::subscription::{ReqFilter, Subscription};
|
||||
use crate::utils::{is_hex, unix_time};
|
||||
use async_trait::async_trait;
|
||||
use hex;
|
||||
use r2d2;
|
||||
use r2d2_sqlite::SqliteConnectionManager;
|
||||
use rusqlite::params;
|
||||
use rusqlite::types::ToSql;
|
||||
use rusqlite::OpenFlags;
|
||||
use tokio::sync::{Mutex, MutexGuard, Semaphore};
|
||||
use std::fmt::Write as _;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::{Mutex, MutexGuard, Semaphore};
|
||||
use tokio::task;
|
||||
use tracing::{debug, info, trace, warn};
|
||||
use async_trait::async_trait;
|
||||
use crate::db::QueryResult;
|
||||
|
||||
use crate::repo::{now_jitter, NostrRepo};
|
||||
use nostr::key::Keys;
|
||||
|
||||
pub type SqlitePool = r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>;
|
||||
pub type PooledConnection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnectionManager>;
|
||||
@@ -54,12 +56,13 @@ pub struct SqliteRepo {
|
||||
|
||||
impl SqliteRepo {
|
||||
// build all the pools needed
|
||||
#[must_use] pub fn new(settings: &Settings, metrics: NostrMetrics) -> SqliteRepo {
|
||||
#[must_use]
|
||||
pub fn new(settings: &Settings, metrics: NostrMetrics) -> SqliteRepo {
|
||||
let write_pool = build_pool(
|
||||
"writer",
|
||||
settings,
|
||||
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
|
||||
1,
|
||||
0,
|
||||
2,
|
||||
false,
|
||||
);
|
||||
@@ -67,7 +70,7 @@ impl SqliteRepo {
|
||||
"maintenance",
|
||||
settings,
|
||||
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
|
||||
1,
|
||||
0,
|
||||
2,
|
||||
true,
|
||||
);
|
||||
@@ -110,7 +113,8 @@ impl SqliteRepo {
|
||||
// get relevant fields from event and convert to blobs.
|
||||
let id_blob = hex::decode(&e.id).ok();
|
||||
let pubkey_blob: Option<Vec<u8>> = hex::decode(&e.pubkey).ok();
|
||||
let delegator_blob: Option<Vec<u8>> = e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
|
||||
let delegator_blob: Option<Vec<u8>> =
|
||||
e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok());
|
||||
let event_str = serde_json::to_string(&e).ok();
|
||||
// check for replaceable events that would hide this one; we won't even attempt to insert these.
|
||||
if e.is_replaceable() {
|
||||
@@ -123,27 +127,20 @@ impl SqliteRepo {
|
||||
}
|
||||
// check for parameterized replaceable events that would be hidden; don't insert these either.
|
||||
if let Some(d_tag) = e.distinct_param() {
|
||||
let repl_count;
|
||||
if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) {
|
||||
repl_count = tx.query_row(
|
||||
"SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.author=? AND e.kind=? AND t.name='d' AND t.value_hex=? AND e.created_at >= ? LIMIT 1;",
|
||||
params![pubkey_blob, e.kind, hex::decode(d_tag).ok(), e.created_at],|row| row.get::<usize, usize>(0));
|
||||
} else {
|
||||
repl_count = tx.query_row(
|
||||
"SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.author=? AND e.kind=? AND t.name='d' AND t.value=? AND e.created_at >= ? LIMIT 1;",
|
||||
params![pubkey_blob, e.kind, d_tag, e.created_at],|row| row.get::<usize, usize>(0));
|
||||
}
|
||||
let repl_count = tx.query_row(
|
||||
"SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.author=? AND e.kind=? AND t.name='d' AND t.value=? AND e.created_at >= ? LIMIT 1;",
|
||||
params![pubkey_blob, e.kind, d_tag, e.created_at],|row| row.get::<usize, usize>(0));
|
||||
// if any rows were returned, then some newer event with
|
||||
// the same author/kind/tag value exist, and we can ignore
|
||||
// this event.
|
||||
if repl_count.ok().is_some() {
|
||||
return Ok(0)
|
||||
return Ok(0);
|
||||
}
|
||||
}
|
||||
// ignore if the event hash is a duplicate.
|
||||
let mut ins_count = tx.execute(
|
||||
"INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, strftime('%s','now'), FALSE);",
|
||||
params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str]
|
||||
"INSERT OR IGNORE INTO event (event_hash, created_at, expires_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, strftime('%s','now'), FALSE);",
|
||||
params![id_blob, e.created_at, e.expiration(), e.kind, pubkey_blob, delegator_blob, event_str]
|
||||
)? as u64;
|
||||
if ins_count == 0 {
|
||||
// if the event was a duplicate, no need to insert event or
|
||||
@@ -163,18 +160,10 @@ impl SqliteRepo {
|
||||
let tagchar_opt = single_char_tagname(tagname);
|
||||
match &tagchar_opt {
|
||||
Some(_) => {
|
||||
// if tagvalue is lowercase hex;
|
||||
if is_lower_hex(tagval) && (tagval.len() % 2 == 0) {
|
||||
tx.execute(
|
||||
"INSERT OR IGNORE INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3)",
|
||||
params![ev_id, &tagname, hex::decode(tagval).ok()],
|
||||
)?;
|
||||
} else {
|
||||
tx.execute(
|
||||
"INSERT OR IGNORE INTO tag (event_id, name, value) VALUES (?1, ?2, ?3)",
|
||||
params![ev_id, &tagname, &tagval],
|
||||
)?;
|
||||
}
|
||||
tx.execute(
|
||||
"INSERT OR IGNORE INTO tag (event_id, name, value, kind, created_at) VALUES (?1, ?2, ?3, ?4, ?5)",
|
||||
params![ev_id, &tagname, &tagval, e.kind, e.created_at],
|
||||
)?;
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
@@ -201,16 +190,9 @@ impl SqliteRepo {
|
||||
}
|
||||
// if this event is parameterized replaceable, remove other events.
|
||||
if let Some(d_tag) = e.distinct_param() {
|
||||
let update_count;
|
||||
if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) {
|
||||
update_count = tx.execute(
|
||||
"DELETE FROM event WHERE kind=? AND author=? AND id NOT IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=? AND e.author=? AND t.name='d' AND t.value_hex=? ORDER BY created_at DESC LIMIT 1);",
|
||||
params![e.kind, pubkey_blob, e.kind, pubkey_blob, hex::decode(d_tag).ok()])?;
|
||||
} else {
|
||||
update_count = tx.execute(
|
||||
"DELETE FROM event WHERE kind=? AND author=? AND id NOT IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=? AND e.author=? AND t.name='d' AND t.value=? ORDER BY created_at DESC LIMIT 1);",
|
||||
params![e.kind, pubkey_blob, e.kind, pubkey_blob, d_tag])?;
|
||||
}
|
||||
let update_count = tx.execute(
|
||||
"DELETE FROM event WHERE kind=? AND author=? AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=? AND e.author=? AND t.name='d' AND t.value=? ORDER BY t.created_at DESC LIMIT -1 OFFSET 1);",
|
||||
params![e.kind, pubkey_blob, e.kind, pubkey_blob, d_tag])?;
|
||||
if update_count > 0 {
|
||||
info!(
|
||||
"removed {} older parameterized replaceable kind {} events for author: {:?}",
|
||||
@@ -245,8 +227,8 @@ impl SqliteRepo {
|
||||
// check if a deletion has already been recorded for this event.
|
||||
// Only relevant for non-deletion events
|
||||
let del_count = tx.query_row(
|
||||
"SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.author=? AND t.name='e' AND e.kind=5 AND t.value_hex=? LIMIT 1;",
|
||||
params![pubkey_blob, id_blob], |row| row.get::<usize, usize>(0));
|
||||
"SELECT e.id FROM event e WHERE e.author=? AND e.id IN (SELECT t.event_id FROM tag t WHERE t.name='e' AND t.kind=5 AND t.value=?) LIMIT 1;",
|
||||
params![pubkey_blob, e.id], |row| row.get::<usize, usize>(0));
|
||||
// check if a the query returned a result, meaning we should
|
||||
// hid the current event
|
||||
if del_count.ok().is_some() {
|
||||
@@ -271,21 +253,32 @@ impl SqliteRepo {
|
||||
|
||||
#[async_trait]
|
||||
impl NostrRepo for SqliteRepo {
|
||||
|
||||
async fn start(&self) -> Result<()> {
|
||||
db_checkpoint_task(self.maint_pool.clone(), Duration::from_secs(60), self.checkpoint_in_progress.clone()).await
|
||||
db_checkpoint_task(
|
||||
self.maint_pool.clone(),
|
||||
Duration::from_secs(60),
|
||||
self.write_in_progress.clone(),
|
||||
self.checkpoint_in_progress.clone(),
|
||||
)
|
||||
.await?;
|
||||
cleanup_expired(
|
||||
self.maint_pool.clone(),
|
||||
Duration::from_secs(600),
|
||||
self.write_in_progress.clone(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn migrate_up(&self) -> Result<usize> {
|
||||
let _write_guard = self.write_in_progress.lock().await;
|
||||
let mut conn = self.write_pool.get()?;
|
||||
task::spawn_blocking(move || {
|
||||
upgrade_db(&mut conn)
|
||||
}).await?
|
||||
task::spawn_blocking(move || upgrade_db(&mut conn)).await?
|
||||
}
|
||||
/// Persist event to database
|
||||
async fn write_event(&self, e: &Event) -> Result<u64> {
|
||||
let start = Instant::now();
|
||||
let max_write_attempts = 10;
|
||||
let mut attempts = 0;
|
||||
let _write_guard = self.write_in_progress.lock().await;
|
||||
// spawn a blocking thread
|
||||
//let mut conn = self.write_pool.get()?;
|
||||
@@ -293,8 +286,32 @@ impl NostrRepo for SqliteRepo {
|
||||
let e = e.clone();
|
||||
let event_count = task::spawn_blocking(move || {
|
||||
let mut conn = pool.get()?;
|
||||
SqliteRepo::persist_event(&mut conn, &e)
|
||||
}).await?;
|
||||
// this could fail because the database was busy; try
|
||||
// multiple times before giving up.
|
||||
loop {
|
||||
attempts += 1;
|
||||
let wr = SqliteRepo::persist_event(&mut conn, &e);
|
||||
match wr {
|
||||
Err(SqlError(rusqlite::Error::SqliteFailure(e, _))) => {
|
||||
// this basically means that NIP-05 or another
|
||||
// writer was using the database between us
|
||||
// reading and promoting the connection to a
|
||||
// write lock.
|
||||
info!(
|
||||
"event write failed, DB locked (attempt: {}); sqlite err: {}",
|
||||
attempts, e.extended_code
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
return wr;
|
||||
}
|
||||
}
|
||||
if attempts >= max_write_attempts {
|
||||
return wr;
|
||||
}
|
||||
}
|
||||
})
|
||||
.await?;
|
||||
self.metrics
|
||||
.write_events
|
||||
.observe(start.elapsed().as_secs_f64());
|
||||
@@ -319,9 +336,14 @@ impl NostrRepo for SqliteRepo {
|
||||
// thread pool waiting for queries to finish under high load.
|
||||
// Instead, don't bother spawning threads when they will just
|
||||
// block on a database connection.
|
||||
let sem = self.reader_threads_ready.clone().acquire_owned().await.unwrap();
|
||||
let self=self.clone();
|
||||
let metrics=self.metrics.clone();
|
||||
let sem = self
|
||||
.reader_threads_ready
|
||||
.clone()
|
||||
.acquire_owned()
|
||||
.await
|
||||
.unwrap();
|
||||
let self = self.clone();
|
||||
let metrics = self.metrics.clone();
|
||||
task::spawn_blocking(move || {
|
||||
{
|
||||
// if we are waiting on a checkpoint, stop until it is complete
|
||||
@@ -346,7 +368,10 @@ impl NostrRepo for SqliteRepo {
|
||||
}
|
||||
// check before getting a DB connection if the client still wants the results
|
||||
if abandon_query_rx.try_recv().is_ok() {
|
||||
debug!("query cancelled by client (before execution) (cid: {}, sub: {:?})", client_id, sub.id);
|
||||
debug!(
|
||||
"query cancelled by client (before execution) (cid: {}, sub: {:?})",
|
||||
client_id, sub.id
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -359,14 +384,15 @@ impl NostrRepo for SqliteRepo {
|
||||
if let Ok(mut conn) = self.read_pool.get() {
|
||||
{
|
||||
let pool_state = self.read_pool.state();
|
||||
metrics.db_connections.set((pool_state.connections - pool_state.idle_connections).into());
|
||||
metrics
|
||||
.db_connections
|
||||
.set((pool_state.connections - pool_state.idle_connections).into());
|
||||
}
|
||||
for filter in sub.filters.iter() {
|
||||
let filter_start = Instant::now();
|
||||
filter_count += 1;
|
||||
let (q, p, idx) = query_from_filter(&filter);
|
||||
let sql_gen_elapsed = start.elapsed();
|
||||
|
||||
let sql_gen_elapsed = filter_start.elapsed();
|
||||
let (q, p, idx) = query_from_filter(filter);
|
||||
if sql_gen_elapsed > Duration::from_millis(10) {
|
||||
debug!("SQL (slow) generated in {:?}", filter_start.elapsed());
|
||||
}
|
||||
@@ -377,7 +403,7 @@ impl NostrRepo for SqliteRepo {
|
||||
let mut last_successful_send = Instant::now();
|
||||
// execute the query.
|
||||
// make the actual SQL query (with parameters inserted) available
|
||||
conn.trace(Some(|x| {trace!("SQL trace: {:?}", x)}));
|
||||
conn.trace(Some(|x| trace!("SQL trace: {:?}", x)));
|
||||
let mut stmt = conn.prepare_cached(&q)?;
|
||||
let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?;
|
||||
|
||||
@@ -395,7 +421,10 @@ impl NostrRepo for SqliteRepo {
|
||||
if slow_first_event && client_id.starts_with('0') {
|
||||
debug!(
|
||||
"filter first result in {:?} (slow): {} (cid: {}, sub: {:?})",
|
||||
first_event_elapsed, serde_json::to_string(&filter)?, client_id, sub.id
|
||||
first_event_elapsed,
|
||||
serde_json::to_string(&filter)?,
|
||||
client_id,
|
||||
sub.id
|
||||
);
|
||||
}
|
||||
first_result = false;
|
||||
@@ -405,8 +434,14 @@ impl NostrRepo for SqliteRepo {
|
||||
{
|
||||
if self.checkpoint_in_progress.try_lock().is_err() {
|
||||
// lock was held, abort this query
|
||||
debug!("query aborted due to checkpoint (cid: {}, sub: {:?})", client_id, sub.id);
|
||||
metrics.query_aborts.with_label_values(&["checkpoint"]).inc();
|
||||
debug!(
|
||||
"query aborted due to checkpoint (cid: {}, sub: {:?})",
|
||||
client_id, sub.id
|
||||
);
|
||||
metrics
|
||||
.query_aborts
|
||||
.with_label_values(&["checkpoint"])
|
||||
.inc();
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
@@ -414,7 +449,10 @@ impl NostrRepo for SqliteRepo {
|
||||
|
||||
// check if this is still active; every 100 rows
|
||||
if row_count % 100 == 0 && abandon_query_rx.try_recv().is_ok() {
|
||||
debug!("query cancelled by client (cid: {}, sub: {:?})", client_id, sub.id);
|
||||
debug!(
|
||||
"query cancelled by client (cid: {}, sub: {:?})",
|
||||
client_id, sub.id
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
row_count += 1;
|
||||
@@ -430,19 +468,31 @@ impl NostrRepo for SqliteRepo {
|
||||
// the queue has been full for too long, abort
|
||||
info!("aborting database query due to slow client (cid: {}, sub: {:?})",
|
||||
client_id, sub.id);
|
||||
metrics.query_aborts.with_label_values(&["slowclient"]).inc();
|
||||
metrics
|
||||
.query_aborts
|
||||
.with_label_values(&["slowclient"])
|
||||
.inc();
|
||||
let ok: Result<()> = Ok(());
|
||||
return ok;
|
||||
}
|
||||
// check if a checkpoint is trying to run, and abort
|
||||
if self.checkpoint_in_progress.try_lock().is_err() {
|
||||
// lock was held, abort this query
|
||||
debug!("query aborted due to checkpoint (cid: {}, sub: {:?})", client_id, sub.id);
|
||||
metrics.query_aborts.with_label_values(&["checkpoint"]).inc();
|
||||
debug!(
|
||||
"query aborted due to checkpoint (cid: {}, sub: {:?})",
|
||||
client_id, sub.id
|
||||
);
|
||||
metrics
|
||||
.query_aborts
|
||||
.with_label_values(&["checkpoint"])
|
||||
.inc();
|
||||
return Ok(());
|
||||
}
|
||||
// give the queue a chance to clear before trying again
|
||||
debug!("query thread sleeping due to full query_tx (cid: {}, sub: {:?})", client_id, sub.id);
|
||||
debug!(
|
||||
"query thread sleeping due to full query_tx (cid: {}, sub: {:?})",
|
||||
client_id, sub.id
|
||||
);
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
}
|
||||
// TODO: we could use try_send, but we'd have to juggle
|
||||
@@ -463,10 +513,12 @@ impl NostrRepo for SqliteRepo {
|
||||
if filter_start.elapsed() > slow_cutoff && client_id.starts_with('0') {
|
||||
debug!(
|
||||
"query filter req (slow): {} (cid: {}, sub: {:?}, filter: {})",
|
||||
serde_json::to_string(&filter)?, client_id, sub.id, filter_count
|
||||
serde_json::to_string(&filter)?,
|
||||
client_id,
|
||||
sub.id,
|
||||
filter_count
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
warn!("Could not get a database connection for querying");
|
||||
@@ -502,7 +554,8 @@ impl NostrRepo for SqliteRepo {
|
||||
let start = Instant::now();
|
||||
conn.execute_batch("PRAGMA optimize;").ok();
|
||||
info!("optimize ran in {:?}", start.elapsed());
|
||||
}).await?;
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -511,6 +564,7 @@ impl NostrRepo for SqliteRepo {
|
||||
let e = hex::decode(event_id).ok();
|
||||
let n = name.to_owned();
|
||||
let mut conn = self.write_pool.get()?;
|
||||
let _write_guard = self.write_in_progress.lock().await;
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
@@ -538,6 +592,7 @@ impl NostrRepo for SqliteRepo {
|
||||
/// Update verification timestamp
|
||||
async fn update_verification_timestamp(&self, id: u64) -> Result<()> {
|
||||
let mut conn = self.write_pool.get()?;
|
||||
let _write_guard = self.write_in_progress.lock().await;
|
||||
tokio::task::spawn_blocking(move || {
|
||||
// add some jitter to the verification to prevent everything from stacking up together.
|
||||
let verif_time = now_jitter(600);
|
||||
@@ -553,13 +608,13 @@ impl NostrRepo for SqliteRepo {
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
|
||||
.await?
|
||||
}
|
||||
|
||||
/// Update verification record as failed
|
||||
async fn fail_verification(&self, id: u64) -> Result<()> {
|
||||
let mut conn = self.write_pool.get()?;
|
||||
let _write_guard = self.write_in_progress.lock().await;
|
||||
tokio::task::spawn_blocking(move || {
|
||||
// add some jitter to the verification to prevent everything from stacking up together.
|
||||
let fail_time = now_jitter(600);
|
||||
@@ -579,6 +634,7 @@ impl NostrRepo for SqliteRepo {
|
||||
/// Delete verification record
|
||||
async fn delete_verification(&self, id: u64) -> Result<()> {
|
||||
let mut conn = self.write_pool.get()?;
|
||||
let _write_guard = self.write_in_progress.lock().await;
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
@@ -590,7 +646,7 @@ impl NostrRepo for SqliteRepo {
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
.await?
|
||||
}
|
||||
|
||||
/// Get the latest verification record for a given pubkey.
|
||||
@@ -668,6 +724,209 @@ impl NostrRepo for SqliteRepo {
|
||||
Ok(vr)
|
||||
}).await?
|
||||
}
|
||||
|
||||
/// Create account
|
||||
async fn create_account(&self, pub_key: &Keys) -> Result<bool> {
|
||||
let pub_key = pub_key.public_key().to_string();
|
||||
|
||||
let mut conn = self.write_pool.get()?;
|
||||
let ins_count = tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
let ins_count: u64;
|
||||
{
|
||||
// Ignore if user is already in db
|
||||
let query = "INSERT OR IGNORE INTO account (pubkey, is_admitted, balance) VALUES (?1, ?2, ?3);";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
ins_count = stmt.execute(params![&pub_key, false, 0])? as u64;
|
||||
}
|
||||
tx.commit()?;
|
||||
let ok: Result<u64> = Ok(ins_count);
|
||||
ok
|
||||
}).await??;
|
||||
|
||||
if ins_count != 1 {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Admit account
|
||||
async fn admit_account(&self, pub_key: &Keys, admission_cost: u64) -> Result<()> {
|
||||
let pub_key = pub_key.public_key().to_string();
|
||||
let mut conn = self.write_pool.get()?;
|
||||
let pub_key = pub_key.to_owned();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let query = "UPDATE account SET is_admitted = TRUE, tos_accepted_at = strftime('%s','now'), balance = balance - ?1 WHERE pubkey=?2;";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![admission_cost, pub_key])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
|
||||
/// Gets if the account is admitted and balance
|
||||
async fn get_account_balance(&self, pub_key: &Keys) -> Result<(bool, u64)> {
|
||||
let pub_key = pub_key.public_key().to_string();
|
||||
let mut conn = self.write_pool.get()?;
|
||||
let pub_key = pub_key.to_owned();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
let query = "SELECT is_admitted, balance FROM account WHERE pubkey = ?1;";
|
||||
let mut stmt = tx.prepare_cached(query)?;
|
||||
let fields = stmt.query_row(params![pub_key], |r| {
|
||||
let is_admitted: bool = r.get(0)?;
|
||||
let balance: u64 = r.get(1)?;
|
||||
// create a tuple since we can't throw non-rusqlite errors in this closure
|
||||
Ok((is_admitted, balance))
|
||||
})?;
|
||||
Ok(fields)
|
||||
})
|
||||
.await?
|
||||
}
|
||||
|
||||
/// Update account balance
|
||||
async fn update_account_balance(
|
||||
&self,
|
||||
pub_key: &Keys,
|
||||
positive: bool,
|
||||
new_balance: u64,
|
||||
) -> Result<()> {
|
||||
let pub_key = pub_key.public_key().to_string();
|
||||
|
||||
let mut conn = self.write_pool.get()?;
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let query = if positive {
|
||||
"UPDATE account SET balance=balance + ?1 WHERE pubkey=?2"
|
||||
} else {
|
||||
"UPDATE account SET balance=balance - ?1 WHERE pubkey=?2"
|
||||
};
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![new_balance, pub_key])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
|
||||
/// Create invoice record
|
||||
async fn create_invoice_record(&self, pub_key: &Keys, invoice_info: InvoiceInfo) -> Result<()> {
|
||||
let pub_key = pub_key.public_key().to_string();
|
||||
let pub_key = pub_key.to_owned();
|
||||
let mut conn = self.write_pool.get()?;
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
{
|
||||
let query = "INSERT INTO invoice (pubkey, payment_hash, amount, status, description, created_at, invoice) VALUES (?1, ?2, ?3, ?4, ?5, strftime('%s','now'), ?6);";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![&pub_key, invoice_info.payment_hash, invoice_info.amount, invoice_info.status.to_string(), invoice_info.memo, invoice_info.bolt11])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
}).await??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update invoice record
|
||||
async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result<String> {
|
||||
let mut conn = self.write_pool.get()?;
|
||||
let payment_hash = payment_hash.to_owned();
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
let pubkey: String;
|
||||
{
|
||||
|
||||
// Get required invoice info for given payment hash
|
||||
let query = "SELECT pubkey, status, amount FROM invoice WHERE payment_hash=?1;";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
let (pub_key, prev_status, amount) = stmt.query_row(params![payment_hash], |r| {
|
||||
let pub_key: String = r.get(0)?;
|
||||
let status: String = r.get(1)?;
|
||||
let amount: u64 = r.get(2)?;
|
||||
|
||||
|
||||
Ok((pub_key, status, amount))
|
||||
|
||||
})?;
|
||||
|
||||
// If the invoice is paid update the confirmed_at timestamp
|
||||
let query = if status.eq(&InvoiceStatus::Paid) {
|
||||
"UPDATE invoice SET status=?1, confirmed_at = strftime('%s', 'now') WHERE payment_hash=?2;"
|
||||
} else {
|
||||
"UPDATE invoice SET status=?1 WHERE payment_hash=?2;"
|
||||
};
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![status.to_string(), payment_hash])?;
|
||||
|
||||
// Increase account balance by given invoice amount
|
||||
if prev_status == "Unpaid" && status.eq(&InvoiceStatus::Paid) {
|
||||
let query =
|
||||
"UPDATE account SET balance = balance + ?1 WHERE pubkey = ?2;";
|
||||
let mut stmt = tx.prepare(query)?;
|
||||
stmt.execute(params![amount, pub_key])?;
|
||||
}
|
||||
|
||||
pubkey = pub_key;
|
||||
}
|
||||
|
||||
tx.commit()?;
|
||||
let ok: Result<String> = Ok(pubkey);
|
||||
ok
|
||||
})
|
||||
.await?
|
||||
}
|
||||
|
||||
/// Get the most recent invoice for a given pubkey
|
||||
/// invoice must be unpaid and not expired
|
||||
async fn get_unpaid_invoice(&self, pubkey: &Keys) -> Result<Option<InvoiceInfo>> {
|
||||
let mut conn = self.write_pool.get()?;
|
||||
|
||||
let pubkey = pubkey.to_owned();
|
||||
let pubkey_str = pubkey.clone().public_key().to_string();
|
||||
let (payment_hash, invoice, amount, description) = tokio::task::spawn_blocking(move || {
|
||||
let tx = conn.transaction()?;
|
||||
|
||||
let query = r#"
|
||||
SELECT amount, payment_hash, description, invoice
|
||||
FROM invoice
|
||||
WHERE pubkey = ?1 AND status = 'Unpaid'
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 1;
|
||||
"#;
|
||||
let mut stmt = tx.prepare(query).unwrap();
|
||||
stmt.query_row(params![&pubkey_str], |r| {
|
||||
let amount: u64 = r.get(0)?;
|
||||
let payment_hash: String = r.get(1)?;
|
||||
let description: String = r.get(2)?;
|
||||
let invoice: String = r.get(3)?;
|
||||
|
||||
Ok((payment_hash, invoice, amount, description))
|
||||
})
|
||||
})
|
||||
.await??;
|
||||
|
||||
Ok(Some(InvoiceInfo {
|
||||
pubkey: pubkey.public_key().to_string(),
|
||||
payment_hash,
|
||||
bolt11: invoice,
|
||||
amount,
|
||||
status: InvoiceStatus::Unpaid,
|
||||
memo: description,
|
||||
confirmed_at: None,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
/// Decide if there is an index that should be used explicitly
|
||||
@@ -678,14 +937,15 @@ fn override_index(f: &ReqFilter) -> Option<String> {
|
||||
// queries for multiple kinds default to kind_index, which is
|
||||
// significantly slower than kind_created_at_index.
|
||||
if let Some(ks) = &f.kinds {
|
||||
if f.ids.is_none() &&
|
||||
ks.len() > 1 &&
|
||||
f.since.is_none() &&
|
||||
f.until.is_none() &&
|
||||
f.tags.is_none() &&
|
||||
f.authors.is_none() {
|
||||
return Some("kind_created_at_index".into());
|
||||
}
|
||||
if f.ids.is_none()
|
||||
&& ks.len() > 1
|
||||
&& f.since.is_none()
|
||||
&& f.until.is_none()
|
||||
&& f.tags.is_none()
|
||||
&& f.authors.is_none()
|
||||
{
|
||||
return Some("kind_created_at_index".into());
|
||||
}
|
||||
}
|
||||
// if there is an author, it is much better to force the authors index.
|
||||
if f.authors.is_some() {
|
||||
@@ -718,10 +978,12 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
|
||||
return (empty_query, empty_params, None);
|
||||
}
|
||||
|
||||
// check if the index needs to be overriden
|
||||
// check if the index needs to be overridden
|
||||
let idx_name = override_index(f);
|
||||
let idx_stmt = idx_name.as_ref().map_or_else(|| "".to_owned(), |i| format!("INDEXED BY {}",i));
|
||||
let mut query = format!("SELECT e.content FROM event e {}", idx_stmt);
|
||||
let idx_stmt = idx_name
|
||||
.as_ref()
|
||||
.map_or_else(|| "".to_owned(), |i| format!("INDEXED BY {i}"));
|
||||
let mut query = format!("SELECT e.content FROM event e {idx_stmt}");
|
||||
// query parameters for SQLite
|
||||
let mut params: Vec<Box<dyn ToSql>> = vec![];
|
||||
|
||||
@@ -738,9 +1000,7 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
|
||||
params.push(Box::new(ex));
|
||||
}
|
||||
Some(HexSearch::Range(lower, upper)) => {
|
||||
auth_searches.push(
|
||||
"(author>? AND author<?)".to_owned(),
|
||||
);
|
||||
auth_searches.push("(author>? AND author<?)".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
params.push(Box::new(upper));
|
||||
}
|
||||
@@ -749,7 +1009,7 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
|
||||
params.push(Box::new(lower));
|
||||
}
|
||||
None => {
|
||||
info!("Could not parse hex range from author {:?}", auth);
|
||||
trace!("Could not parse hex range from author {:?}", auth);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -804,75 +1064,60 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
|
||||
if let Some(map) = &f.tags {
|
||||
for (key, val) in map.iter() {
|
||||
let mut str_vals: Vec<Box<dyn ToSql>> = vec![];
|
||||
let mut blob_vals: Vec<Box<dyn ToSql>> = vec![];
|
||||
for v in val {
|
||||
if (v.len() % 2 == 0) && is_lower_hex(v) {
|
||||
if let Ok(h) = hex::decode(v) {
|
||||
blob_vals.push(Box::new(h));
|
||||
}
|
||||
} else {
|
||||
str_vals.push(Box::new(v.clone()));
|
||||
}
|
||||
str_vals.push(Box::new(v.clone()));
|
||||
}
|
||||
// do not mix value and value_hex; this is a temporary special case.
|
||||
if str_vals.len() == 0 {
|
||||
// create clauses with "?" params for each tag value being searched
|
||||
let blob_clause = format!("value_hex IN ({})", repeat_vars(blob_vals.len()));
|
||||
// find evidence of the target tag name/value existing for this event.
|
||||
let tag_clause = format!(
|
||||
"e.id IN (SELECT e.id FROM event e LEFT JOIN tag t on e.id=t.event_id WHERE hidden!=TRUE and (name=? AND {}))",
|
||||
blob_clause
|
||||
);
|
||||
// add the tag name as the first parameter
|
||||
params.push(Box::new(key.to_string()));
|
||||
// add all tag values that are blobs as params
|
||||
params.append(&mut blob_vals);
|
||||
filter_components.push(tag_clause);
|
||||
} else if blob_vals.len() == 0 {
|
||||
// create clauses with "?" params for each tag value being searched
|
||||
let str_clause = format!("value IN ({})", repeat_vars(str_vals.len()));
|
||||
// find evidence of the target tag name/value existing for this event.
|
||||
let tag_clause = format!(
|
||||
"e.id IN (SELECT e.id FROM event e LEFT JOIN tag t on e.id=t.event_id WHERE hidden!=TRUE and (name=? AND {}))",
|
||||
str_clause
|
||||
);
|
||||
// add the tag name as the first parameter
|
||||
params.push(Box::new(key.to_string()));
|
||||
// add all tag values that are blobs as params
|
||||
params.append(&mut str_vals);
|
||||
filter_components.push(tag_clause);
|
||||
// create clauses with "?" params for each tag value being searched
|
||||
let str_clause = format!("AND value IN ({})", repeat_vars(str_vals.len()));
|
||||
// find evidence of the target tag name/value existing for this event.
|
||||
// Query for Kind/Since/Until additionally, to reduce the number of tags that come back.
|
||||
let kind_clause;
|
||||
if let Some(ks) = &f.kinds {
|
||||
// kind is number, no escaping needed
|
||||
let str_kinds: Vec<String> =
|
||||
ks.iter().map(std::string::ToString::to_string).collect();
|
||||
kind_clause = format!("AND kind IN ({})", str_kinds.join(", "));
|
||||
} else {
|
||||
debug!("mixed string/blob query");
|
||||
// create clauses with "?" params for each tag value being searched
|
||||
let str_clause = format!("value IN ({})", repeat_vars(str_vals.len()));
|
||||
let blob_clause = format!("value_hex IN ({})", repeat_vars(blob_vals.len()));
|
||||
// find evidence of the target tag name/value existing for this event.
|
||||
let tag_clause = format!(
|
||||
"e.id IN (SELECT e.id FROM event e LEFT JOIN tag t on e.id=t.event_id WHERE hidden!=TRUE and (name=? AND ({} OR {})))",
|
||||
str_clause, blob_clause
|
||||
);
|
||||
// add the tag name as the first parameter
|
||||
params.push(Box::new(key.to_string()));
|
||||
// add all tag values that are plain strings as params
|
||||
params.append(&mut str_vals);
|
||||
// add all tag values that are blobs as params
|
||||
params.append(&mut blob_vals);
|
||||
filter_components.push(tag_clause);
|
||||
}
|
||||
kind_clause = String::new();
|
||||
};
|
||||
let since_clause = if f.since.is_some() {
|
||||
format!("AND created_at >= {}", f.since.unwrap())
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
// Query for timestamp
|
||||
let until_clause = if f.until.is_some() {
|
||||
format!("AND created_at <= {}", f.until.unwrap())
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
let tag_clause = format!(
|
||||
"e.id IN (SELECT t.event_id FROM tag t WHERE (name=? {str_clause} {kind_clause} {since_clause} {until_clause}))"
|
||||
);
|
||||
|
||||
// add the tag name as the first parameter
|
||||
params.push(Box::new(key.to_string()));
|
||||
// add all tag values that are blobs as params
|
||||
params.append(&mut str_vals);
|
||||
filter_components.push(tag_clause);
|
||||
}
|
||||
}
|
||||
// Query for timestamp
|
||||
if f.since.is_some() {
|
||||
let created_clause = format!("created_at > {}", f.since.unwrap());
|
||||
let created_clause = format!("created_at >= {}", f.since.unwrap());
|
||||
filter_components.push(created_clause);
|
||||
}
|
||||
// Query for timestamp
|
||||
if f.until.is_some() {
|
||||
let until_clause = format!("created_at < {}", f.until.unwrap());
|
||||
let until_clause = format!("created_at <= {}", f.until.unwrap());
|
||||
filter_components.push(until_clause);
|
||||
}
|
||||
// never display hidden events
|
||||
query.push_str(" WHERE hidden!=TRUE");
|
||||
// never display hidden events
|
||||
filter_components.push("(expires_at IS NULL OR expires_at > ?)".to_string());
|
||||
params.push(Box::new(unix_time()));
|
||||
// build filter component conditions
|
||||
if !filter_components.is_empty() {
|
||||
query.push_str(" AND ");
|
||||
@@ -881,7 +1126,7 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
|
||||
// Apply per-filter limit to this subquery.
|
||||
// The use of a LIMIT implies a DESC order, to capture only the most recent events.
|
||||
if let Some(lim) = f.limit {
|
||||
let _ = write!(query, " ORDER BY e.created_at DESC LIMIT {}", lim);
|
||||
let _ = write!(query, " ORDER BY e.created_at DESC LIMIT {lim}");
|
||||
} else {
|
||||
query.push_str(" ORDER BY e.created_at ASC");
|
||||
}
|
||||
@@ -908,10 +1153,10 @@ fn _query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>, Vec<Stri
|
||||
// encapsulate subqueries into select statements
|
||||
let subqueries_selects: Vec<String> = subqueries
|
||||
.iter()
|
||||
.map(|s| format!("SELECT distinct content, created_at FROM ({})", s))
|
||||
.map(|s| format!("SELECT distinct content, created_at FROM ({s})"))
|
||||
.collect();
|
||||
let query: String = subqueries_selects.join(" UNION ");
|
||||
(query, params,indexes)
|
||||
(query, params, indexes)
|
||||
}
|
||||
|
||||
/// Build a database connection pool.
|
||||
@@ -940,7 +1185,7 @@ pub fn build_pool(
|
||||
}
|
||||
}
|
||||
let manager = if settings.database.in_memory {
|
||||
SqliteConnectionManager::memory()
|
||||
SqliteConnectionManager::file("file::memory:?cache=shared")
|
||||
.with_flags(flags)
|
||||
.with_init(|c| c.execute_batch(STARTUP_SQL))
|
||||
} else {
|
||||
@@ -952,9 +1197,15 @@ pub fn build_pool(
|
||||
.test_on_check_out(true) // no noticeable performance hit
|
||||
.min_idle(Some(min_size))
|
||||
.max_size(max_size)
|
||||
.idle_timeout(Some(Duration::from_secs(10)))
|
||||
.max_lifetime(Some(Duration::from_secs(30)))
|
||||
.build(manager)
|
||||
.unwrap();
|
||||
// retrieve a connection to ensure the startup statements run immediately
|
||||
{
|
||||
let _ = pool.get();
|
||||
}
|
||||
|
||||
info!(
|
||||
"Built a connection pool {:?} (min={}, max={})",
|
||||
name, min_size, max_size
|
||||
@@ -962,14 +1213,71 @@ pub fn build_pool(
|
||||
pool
|
||||
}
|
||||
|
||||
/// Cleanup expired events on a regular basis
|
||||
async fn cleanup_expired(
|
||||
pool: SqlitePool,
|
||||
frequency: Duration,
|
||||
write_in_progress: Arc<Mutex<u64>>,
|
||||
) -> Result<()> {
|
||||
tokio::task::spawn(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = tokio::time::sleep(frequency) => {
|
||||
if let Ok(mut conn) = pool.get() {
|
||||
let mut _guard:Option<MutexGuard<u64>> = None;
|
||||
// take a write lock to prevent event writes
|
||||
// from proceeding while we are deleting
|
||||
// events. This isn't necessary, but
|
||||
// minimizes the chances of forcing event
|
||||
// persistence to be retried.
|
||||
_guard = Some(write_in_progress.lock().await);
|
||||
let start = Instant::now();
|
||||
let exp_res = tokio::task::spawn_blocking(move || {
|
||||
delete_expired(&mut conn)
|
||||
}).await;
|
||||
match exp_res {
|
||||
Ok(Ok(count)) => {
|
||||
if count > 0 {
|
||||
info!("removed {} expired events in: {:?}", count, start.elapsed());
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
// either the task or underlying query failed
|
||||
info!("there was an error cleaning up expired events: {:?}", exp_res);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Execute a query to delete all expired events
|
||||
pub fn delete_expired(conn: &mut PooledConnection) -> Result<usize> {
|
||||
let tx = conn.transaction()?;
|
||||
let update_count = tx.execute(
|
||||
"DELETE FROM event WHERE expires_at <= ?",
|
||||
params![unix_time()],
|
||||
)?;
|
||||
tx.commit()?;
|
||||
Ok(update_count)
|
||||
}
|
||||
|
||||
/// Perform database WAL checkpoint on a regular basis
|
||||
pub async fn db_checkpoint_task(pool: SqlitePool, frequency: Duration, checkpoint_in_progress: Arc<Mutex<u64>>) -> Result<()> {
|
||||
pub async fn db_checkpoint_task(
|
||||
pool: SqlitePool,
|
||||
frequency: Duration,
|
||||
write_in_progress: Arc<Mutex<u64>>,
|
||||
checkpoint_in_progress: Arc<Mutex<u64>>,
|
||||
) -> Result<()> {
|
||||
// TODO; use acquire_many on the reader semaphore to stop them from interrupting this.
|
||||
tokio::task::spawn(async move {
|
||||
// WAL size in pages.
|
||||
let mut current_wal_size = 0;
|
||||
// WAL threshold for more aggressive checkpointing (10,000 pages, or about 40MB)
|
||||
let wal_threshold = 1000*10;
|
||||
let wal_threshold = 1000 * 10;
|
||||
// default threshold for the busy timer
|
||||
let busy_wait_default = Duration::from_secs(1);
|
||||
// if the WAL file is getting too big, switch to this
|
||||
@@ -978,6 +1286,8 @@ pub async fn db_checkpoint_task(pool: SqlitePool, frequency: Duration, checkpoin
|
||||
tokio::select! {
|
||||
_ = tokio::time::sleep(frequency) => {
|
||||
if let Ok(mut conn) = pool.get() {
|
||||
// block all other writers
|
||||
let _write_guard = write_in_progress.lock().await;
|
||||
let mut _guard:Option<MutexGuard<u64>> = None;
|
||||
// the busy timer will block writers, so don't set
|
||||
// this any higher than you want max latency for event
|
||||
@@ -1037,7 +1347,6 @@ pub fn checkpoint_db(conn: &mut PooledConnection) -> Result<usize> {
|
||||
Ok(wal_size as usize)
|
||||
}
|
||||
|
||||
|
||||
/// Produce a arbitrary list of '?' parameters.
|
||||
fn repeat_vars(count: usize) -> String {
|
||||
if count == 0 {
|
||||
@@ -1071,7 +1380,6 @@ fn log_pool_stats(name: &str, pool: &SqlitePool) {
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
/// Check if the pool is fully utilized
|
||||
fn _pool_at_capacity(pool: &SqlitePool) -> bool {
|
||||
let state: r2d2::State = pool.state();
|
||||
|
@@ -4,6 +4,7 @@ use crate::error::Result;
|
||||
use crate::event::{single_char_tagname, Event};
|
||||
use crate::utils::is_lower_hex;
|
||||
use const_format::formatcp;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use rusqlite::limits::Limit;
|
||||
use rusqlite::params;
|
||||
use rusqlite::Connection;
|
||||
@@ -16,11 +17,13 @@ pub const STARTUP_SQL: &str = r##"
|
||||
PRAGMA main.synchronous = NORMAL;
|
||||
PRAGMA foreign_keys = ON;
|
||||
PRAGMA journal_size_limit = 32768;
|
||||
pragma mmap_size = 17179869184; -- cap mmap at 16GB
|
||||
PRAGMA temp_store = 2; -- use memory, not temp files
|
||||
PRAGMA main.cache_size = 20000; -- 80MB max cache size per conn
|
||||
pragma mmap_size = 0; -- disable mmap (default)
|
||||
"##;
|
||||
|
||||
/// Latest database version
|
||||
pub const DB_VERSION: usize = 15;
|
||||
pub const DB_VERSION: usize = 18;
|
||||
|
||||
/// Schema definition
|
||||
const INIT_SQL: &str = formatcp!(
|
||||
@@ -40,6 +43,7 @@ id INTEGER PRIMARY KEY,
|
||||
event_hash BLOB NOT NULL, -- 4-byte hash
|
||||
first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970)
|
||||
created_at INTEGER NOT NULL, -- when the event was authored
|
||||
expires_at INTEGER, -- when the event expires and may be deleted
|
||||
author BLOB NOT NULL, -- author pubkey
|
||||
delegated_by BLOB, -- delegator pubkey (NIP-26)
|
||||
kind INTEGER NOT NULL, -- event kind
|
||||
@@ -58,23 +62,27 @@ CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author);
|
||||
CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at);
|
||||
CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at);
|
||||
CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind);
|
||||
CREATE INDEX IF NOT EXISTS event_expiration ON event(expires_at);
|
||||
|
||||
-- Tag Table
|
||||
-- Tag values are stored as either a BLOB (if they come in as a
|
||||
-- hex-string), or TEXT otherwise.
|
||||
-- This means that searches need to select the appropriate column.
|
||||
-- We duplicate the kind/created_at to make indexes much more efficient.
|
||||
CREATE TABLE IF NOT EXISTS tag (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
||||
name TEXT, -- the tag name ("p", "e", whatever)
|
||||
value TEXT, -- the tag value, if not hex.
|
||||
value_hex BLOB, -- the tag value, if it can be interpreted as a lowercase hex string.
|
||||
created_at INTEGER NOT NULL, -- when the event was authored
|
||||
kind INTEGER NOT NULL, -- event kind
|
||||
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_hex_index ON tag(value_hex);
|
||||
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex);
|
||||
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,event_id);
|
||||
|
||||
-- NIP-05 User Validation
|
||||
CREATE TABLE IF NOT EXISTS user_verification (
|
||||
@@ -88,6 +96,35 @@ FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CAS
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name);
|
||||
CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event);
|
||||
|
||||
-- Create account table
|
||||
CREATE TABLE IF NOT EXISTS account (
|
||||
pubkey TEXT PRIMARY KEY,
|
||||
is_admitted INTEGER NOT NULL DEFAULT 0,
|
||||
balance INTEGER NOT NULL DEFAULT 0,
|
||||
tos_accepted_at INTEGER
|
||||
);
|
||||
|
||||
-- Create account index
|
||||
CREATE INDEX IF NOT EXISTS user_pubkey_index ON account(pubkey);
|
||||
|
||||
-- Invoice table
|
||||
CREATE TABLE IF NOT EXISTS invoice (
|
||||
payment_hash TEXT PRIMARY KEY,
|
||||
pubkey TEXT NOT NULL,
|
||||
invoice TEXT NOT NULL,
|
||||
amount INTEGER NOT NULL,
|
||||
status TEXT CHECK ( status IN ('Paid', 'Unpaid', 'Expired' ) ) NOT NUll DEFAULT 'Unpaid',
|
||||
description TEXT,
|
||||
created_at INTEGER NOT NULL,
|
||||
confirmed_at INTEGER,
|
||||
CONSTRAINT invoice_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES account (pubkey) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Create invoice index
|
||||
CREATE INDEX IF NOT EXISTS invoice_pubkey_index ON invoice(pubkey);
|
||||
|
||||
|
||||
"##,
|
||||
DB_VERSION
|
||||
);
|
||||
@@ -199,6 +236,15 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<usize> {
|
||||
if curr_version == 14 {
|
||||
curr_version = mig_14_to_15(conn)?;
|
||||
}
|
||||
if curr_version == 15 {
|
||||
curr_version = mig_15_to_16(conn)?;
|
||||
}
|
||||
if curr_version == 16 {
|
||||
curr_version = mig_16_to_17(conn)?;
|
||||
}
|
||||
if curr_version == 17 {
|
||||
curr_version = mig_17_to_18(conn)?;
|
||||
}
|
||||
|
||||
if curr_version == DB_VERSION {
|
||||
info!(
|
||||
@@ -209,13 +255,12 @@ pub fn upgrade_db(conn: &mut PooledConnection) -> Result<usize> {
|
||||
}
|
||||
// Database is current, all is good
|
||||
Ordering::Equal => {
|
||||
debug!("Database version was already current (v{})", DB_VERSION);
|
||||
debug!("Database version was already current (v{DB_VERSION})");
|
||||
}
|
||||
// Database is newer than what this code understands, abort
|
||||
Ordering::Greater => {
|
||||
panic!(
|
||||
"Database version is newer than supported by this executable (v{} > v{})",
|
||||
curr_version, DB_VERSION
|
||||
"Database version is newer than supported by this executable (v{curr_version} > v{DB_VERSION})",
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -240,8 +285,8 @@ pub fn rebuild_tags(conn: &mut PooledConnection) -> Result<()> {
|
||||
let mut stmt = tx.prepare("select id, content from event order by id;")?;
|
||||
let mut tag_rows = stmt.query([])?;
|
||||
while let Some(row) = tag_rows.next()? {
|
||||
if (events_processed as f32)/(count as f32) > percent_done {
|
||||
info!("Tag update {}% complete...", (100.0*percent_done).round());
|
||||
if (events_processed as f32) / (count as f32) > percent_done {
|
||||
info!("Tag update {}% complete...", (100.0 * percent_done).round());
|
||||
percent_done += update_each_percent;
|
||||
}
|
||||
// we want to capture the event_id that had the tag, the tag name, and the tag hex value.
|
||||
@@ -280,8 +325,6 @@ pub fn rebuild_tags(conn: &mut PooledConnection) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
|
||||
//// Migration Scripts
|
||||
|
||||
fn mig_1_to_2(conn: &mut PooledConnection) -> Result<usize> {
|
||||
@@ -573,11 +616,17 @@ fn mig_11_to_12(conn: &mut PooledConnection) -> Result<usize> {
|
||||
tx.execute("PRAGMA user_version = 12;", [])?;
|
||||
}
|
||||
tx.commit()?;
|
||||
info!("database schema upgraded v11 -> v12 in {:?}", start.elapsed());
|
||||
info!(
|
||||
"database schema upgraded v11 -> v12 in {:?}",
|
||||
start.elapsed()
|
||||
);
|
||||
// vacuum after large table modification
|
||||
let start = Instant::now();
|
||||
conn.execute("VACUUM;", [])?;
|
||||
info!("vacuumed DB after hidden event cleanup in {:?}", start.elapsed());
|
||||
info!(
|
||||
"vacuumed DB after hidden event cleanup in {:?}",
|
||||
start.elapsed()
|
||||
);
|
||||
Ok(12)
|
||||
}
|
||||
|
||||
@@ -643,7 +692,7 @@ PRAGMA user_version = 15;
|
||||
match conn.execute_batch(clear_hidden_sql) {
|
||||
Ok(()) => {
|
||||
info!("all hidden events removed");
|
||||
},
|
||||
}
|
||||
Err(err) => {
|
||||
error!("delete failed: {}", err);
|
||||
panic!("could not remove hidden events");
|
||||
@@ -651,3 +700,142 @@ PRAGMA user_version = 15;
|
||||
}
|
||||
Ok(15)
|
||||
}
|
||||
|
||||
fn mig_15_to_16(conn: &mut PooledConnection) -> Result<usize> {
|
||||
let count = db_event_count(conn)?;
|
||||
info!("database schema needs update from 15->16 (this may take a few minutes)");
|
||||
let upgrade_sql = r##"
|
||||
DROP TABLE tag;
|
||||
CREATE TABLE tag (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL, -- an event ID that contains a tag.
|
||||
name TEXT, -- the tag name ("p", "e", whatever)
|
||||
value TEXT, -- the tag value, if not hex.
|
||||
created_at INTEGER NOT NULL, -- when the event was authored
|
||||
kind INTEGER NOT NULL, -- event kind
|
||||
FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value);
|
||||
CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value);
|
||||
CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,event_id);
|
||||
"##;
|
||||
|
||||
let start = Instant::now();
|
||||
let tx = conn.transaction()?;
|
||||
|
||||
let bar = ProgressBar::new(count.try_into().unwrap()).with_message("rebuilding tags table");
|
||||
bar.set_style(
|
||||
ProgressStyle::with_template(
|
||||
"[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
{
|
||||
tx.execute_batch(upgrade_sql)?;
|
||||
let mut stmt =
|
||||
tx.prepare("select id, kind, created_at, content from event order by id;")?;
|
||||
let mut tag_rows = stmt.query([])?;
|
||||
let mut count = 0;
|
||||
while let Some(row) = tag_rows.next()? {
|
||||
count += 1;
|
||||
if count % 10 == 0 {
|
||||
bar.inc(10);
|
||||
}
|
||||
let event_id: u64 = row.get(0)?;
|
||||
let kind: u64 = row.get(1)?;
|
||||
let created_at: u64 = row.get(2)?;
|
||||
let event_json: String = row.get(3)?;
|
||||
let event: Event = serde_json::from_str(&event_json)?;
|
||||
// look at each event, and each tag, creating new tag entries if appropriate.
|
||||
for t in event.tags.iter().filter(|x| x.len() > 1) {
|
||||
let tagname = t.get(0).unwrap();
|
||||
let tagnamechar_opt = single_char_tagname(tagname);
|
||||
if tagnamechar_opt.is_none() {
|
||||
continue;
|
||||
}
|
||||
// safe because len was > 1
|
||||
let tagval = t.get(1).unwrap();
|
||||
// otherwise, insert as text
|
||||
tx.execute(
|
||||
"INSERT INTO tag (event_id, name, value, kind, created_at) VALUES (?1, ?2, ?3, ?4, ?5);",
|
||||
params![event_id, tagname, &tagval, kind, created_at],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
tx.execute("PRAGMA user_version = 16;", [])?;
|
||||
}
|
||||
bar.finish();
|
||||
tx.commit()?;
|
||||
info!(
|
||||
"database schema upgraded v15 -> v16 in {:?}",
|
||||
start.elapsed()
|
||||
);
|
||||
Ok(16)
|
||||
}
|
||||
|
||||
fn mig_16_to_17(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 16->17");
|
||||
let upgrade_sql = r##"
|
||||
ALTER TABLE event ADD COLUMN expires_at INTEGER;
|
||||
CREATE INDEX IF NOT EXISTS event_expiration ON event(expires_at);
|
||||
PRAGMA user_version = 17;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v16 -> v17");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(17)
|
||||
}
|
||||
|
||||
fn mig_17_to_18(conn: &mut PooledConnection) -> Result<usize> {
|
||||
info!("database schema needs update from 17->18");
|
||||
let upgrade_sql = r##"
|
||||
-- Create invoices table
|
||||
CREATE TABLE IF NOT EXISTS invoice (
|
||||
payment_hash TEXT PRIMARY KEY,
|
||||
pubkey TEXT NOT NULL,
|
||||
invoice TEXT NOT NULL,
|
||||
amount INTEGER NOT NULL,
|
||||
status TEXT CHECK ( status IN ('Paid', 'Unpaid', 'Expired' ) ) NOT NUll DEFAULT 'Unpaid',
|
||||
description TEXT,
|
||||
created_at INTEGER NOT NULL,
|
||||
confirmed_at INTEGER,
|
||||
CONSTRAINT invoice_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES account (pubkey) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Create invoice index
|
||||
CREATE INDEX IF NOT EXISTS invoice_pubkey_index ON invoice(pubkey);
|
||||
|
||||
-- Create account table
|
||||
|
||||
CREATE TABLE IF NOT EXISTS account (
|
||||
pubkey TEXT PRIMARY KEY,
|
||||
is_admitted INTEGER NOT NULL DEFAULT 0,
|
||||
balance INTEGER NOT NULL DEFAULT 0,
|
||||
tos_accepted_at INTEGER
|
||||
);
|
||||
|
||||
-- Create account index
|
||||
CREATE INDEX IF NOT EXISTS account_pubkey_index ON account(pubkey);
|
||||
|
||||
|
||||
pragma optimize;
|
||||
PRAGMA user_version = 18;
|
||||
"##;
|
||||
match conn.execute_batch(upgrade_sql) {
|
||||
Ok(()) => {
|
||||
info!("database schema upgraded v17 -> v18");
|
||||
}
|
||||
Err(err) => {
|
||||
error!("update failed: {}", err);
|
||||
panic!("database could not be upgraded");
|
||||
}
|
||||
}
|
||||
Ok(18)
|
||||
}
|
||||
|
826
src/server.rs
826
src/server.rs
File diff suppressed because it is too large
Load Diff
@@ -45,7 +45,8 @@ pub struct ReqFilter {
|
||||
|
||||
impl Serialize for ReqFilter {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where S:Serializer,
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut map = serializer.serialize_map(None)?;
|
||||
if let Some(ids) = &self.ids {
|
||||
@@ -68,9 +69,9 @@ impl Serialize for ReqFilter {
|
||||
}
|
||||
// serialize tags
|
||||
if let Some(tags) = &self.tags {
|
||||
for (k,v) in tags {
|
||||
let vals:Vec<&String> = v.iter().collect();
|
||||
map.serialize_entry(&format!("#{}",k), &vals)?;
|
||||
for (k, v) in tags {
|
||||
let vals: Vec<&String> = v.iter().collect();
|
||||
map.serialize_entry(&format!("#{k}"), &vals)?;
|
||||
}
|
||||
}
|
||||
map.end()
|
||||
@@ -105,15 +106,16 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
||||
for (key, val) in filter {
|
||||
// ids
|
||||
if key == "ids" {
|
||||
let raw_ids: Option<Vec<String>>= Deserialize::deserialize(val).ok();
|
||||
let raw_ids: Option<Vec<String>> = Deserialize::deserialize(val).ok();
|
||||
if let Some(a) = raw_ids.as_ref() {
|
||||
if a.contains(&empty_string) {
|
||||
return Err(serde::de::Error::invalid_type(
|
||||
Unexpected::Other("prefix matches must not be empty strings"),
|
||||
&"a json object"));
|
||||
&"a json object",
|
||||
));
|
||||
}
|
||||
}
|
||||
rf.ids =raw_ids;
|
||||
rf.ids = raw_ids;
|
||||
} else if key == "kinds" {
|
||||
rf.kinds = Deserialize::deserialize(val).ok();
|
||||
} else if key == "since" {
|
||||
@@ -123,12 +125,13 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
||||
} else if key == "limit" {
|
||||
rf.limit = Deserialize::deserialize(val).ok();
|
||||
} else if key == "authors" {
|
||||
let raw_authors: Option<Vec<String>>= Deserialize::deserialize(val).ok();
|
||||
let raw_authors: Option<Vec<String>> = Deserialize::deserialize(val).ok();
|
||||
if let Some(a) = raw_authors.as_ref() {
|
||||
if a.contains(&empty_string) {
|
||||
return Err(serde::de::Error::invalid_type(
|
||||
Unexpected::Other("prefix matches must not be empty strings"),
|
||||
&"a json object"));
|
||||
&"a json object",
|
||||
));
|
||||
}
|
||||
}
|
||||
rf.authors = raw_authors;
|
||||
@@ -185,7 +188,7 @@ impl<'de> Deserialize<'de> for Subscription {
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let mut v: Value = Deserialize::deserialize(deserializer)?;
|
||||
// this shoud be a 3-or-more element array.
|
||||
// this should be a 3-or-more element array.
|
||||
// verify the first element is a String, REQ
|
||||
// get the subscription from the second element.
|
||||
// convert each of the remaining objects into filters
|
||||
@@ -232,19 +235,22 @@ impl<'de> Deserialize<'de> for Subscription {
|
||||
|
||||
impl Subscription {
|
||||
/// Get a copy of the subscription identifier.
|
||||
#[must_use] pub fn get_id(&self) -> String {
|
||||
#[must_use]
|
||||
pub fn get_id(&self) -> String {
|
||||
self.id.clone()
|
||||
}
|
||||
|
||||
/// Determine if any filter is requesting historical (database)
|
||||
/// queries. If every filter has limit:0, we do not need to query the DB.
|
||||
#[must_use] pub fn needs_historical_events(&self) -> bool {
|
||||
self.filters.iter().any(|f| f.limit!=Some(0))
|
||||
#[must_use]
|
||||
pub fn needs_historical_events(&self) -> bool {
|
||||
self.filters.iter().any(|f| f.limit != Some(0))
|
||||
}
|
||||
|
||||
/// Determine if this subscription matches a given [`Event`]. Any
|
||||
/// individual filter match is sufficient.
|
||||
#[must_use] pub fn interested_in_event(&self, event: &Event) -> bool {
|
||||
#[must_use]
|
||||
pub fn interested_in_event(&self, event: &Event) -> bool {
|
||||
for f in &self.filters {
|
||||
if f.interested_in_event(event) {
|
||||
return true;
|
||||
@@ -305,17 +311,16 @@ impl ReqFilter {
|
||||
|
||||
/// Check if this filter either matches, or does not care about the kind.
|
||||
fn kind_match(&self, kind: u64) -> bool {
|
||||
self.kinds
|
||||
.as_ref()
|
||||
.map_or(true, |ks| ks.contains(&kind))
|
||||
self.kinds.as_ref().map_or(true, |ks| ks.contains(&kind))
|
||||
}
|
||||
|
||||
/// Determine if all populated fields in this filter match the provided event.
|
||||
#[must_use] pub fn interested_in_event(&self, event: &Event) -> bool {
|
||||
#[must_use]
|
||||
pub fn interested_in_event(&self, event: &Event) -> bool {
|
||||
// self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
|
||||
self.ids_match(event)
|
||||
&& self.since.map_or(true, |t| event.created_at > t)
|
||||
&& self.until.map_or(true, |t| event.created_at < t)
|
||||
&& self.since.map_or(true, |t| event.created_at >= t)
|
||||
&& self.until.map_or(true, |t| event.created_at <= t)
|
||||
&& self.kind_match(event.kind)
|
||||
&& (self.authors_match(event) || self.delegated_authors_match(event))
|
||||
&& self.tag_match(event)
|
||||
@@ -625,7 +630,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn serialize_filter() -> Result<()> {
|
||||
let s: Subscription = serde_json::from_str(r##"["REQ","xyz",{"authors":["abc", "bcd"], "since": 10, "until": 20, "limit":100, "#e": ["foo", "bar"], "#d": ["test"]}]"##)?;
|
||||
let s: Subscription = serde_json::from_str(
|
||||
r##"["REQ","xyz",{"authors":["abc", "bcd"], "since": 10, "until": 20, "limit":100, "#e": ["foo", "bar"], "#d": ["test"]}]"##,
|
||||
)?;
|
||||
let f = s.filters.get(0);
|
||||
let serialized = serde_json::to_string(&f)?;
|
||||
let serialized_wrapped = format!(r##"["REQ", "xyz",{}]"##, serialized);
|
||||
|
22
src/utils.rs
22
src/utils.rs
@@ -1,9 +1,11 @@
|
||||
//! Common utility functions
|
||||
use bech32::FromBase32;
|
||||
use std::time::SystemTime;
|
||||
use url::Url;
|
||||
|
||||
/// Seconds since 1970.
|
||||
#[must_use] pub fn unix_time() -> u64 {
|
||||
#[must_use]
|
||||
pub fn unix_time() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map(|x| x.as_secs())
|
||||
@@ -11,7 +13,8 @@ use std::time::SystemTime;
|
||||
}
|
||||
|
||||
/// Check if a string contains only hex characters.
|
||||
#[must_use] pub fn is_hex(s: &str) -> bool {
|
||||
#[must_use]
|
||||
pub fn is_hex(s: &str) -> bool {
|
||||
s.chars().all(|x| char::is_ascii_hexdigit(&x))
|
||||
}
|
||||
|
||||
@@ -27,12 +30,19 @@ pub fn nip19_to_hex(s: &str) -> Result<String, bech32::Error> {
|
||||
}
|
||||
|
||||
/// Check if a string contains only lower-case hex chars.
|
||||
#[must_use] pub fn is_lower_hex(s: &str) -> bool {
|
||||
#[must_use]
|
||||
pub fn is_lower_hex(s: &str) -> bool {
|
||||
s.chars().all(|x| {
|
||||
(char::is_ascii_lowercase(&x) || char::is_ascii_digit(&x)) && char::is_ascii_hexdigit(&x)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn host_str(url: &str) -> Option<String> {
|
||||
Url::parse(url)
|
||||
.ok()
|
||||
.and_then(|u| u.host_str().map(|s| s.to_string()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -40,15 +50,15 @@ mod tests {
|
||||
#[test]
|
||||
fn lower_hex() {
|
||||
let hexstr = "abcd0123";
|
||||
assert_eq!(is_lower_hex(hexstr), true);
|
||||
assert!(is_lower_hex(hexstr));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nip19() {
|
||||
let hexkey = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d";
|
||||
let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6";
|
||||
assert_eq!(is_nip19(hexkey), false);
|
||||
assert_eq!(is_nip19(nip19key), true);
|
||||
assert!(!is_nip19(hexkey));
|
||||
assert!(is_nip19(nip19key));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@@ -7,4 +7,4 @@ mod tests {
|
||||
use clap::CommandFactory;
|
||||
CLIArgs::command().debug_assert();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
356
tests/conn.rs
Normal file
356
tests/conn.rs
Normal file
@@ -0,0 +1,356 @@
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bitcoin_hashes::hex::ToHex;
|
||||
use bitcoin_hashes::sha256;
|
||||
use bitcoin_hashes::Hash;
|
||||
use secp256k1::rand;
|
||||
use secp256k1::{KeyPair, Secp256k1, XOnlyPublicKey};
|
||||
|
||||
use nostr_rs_relay::conn::ClientConn;
|
||||
use nostr_rs_relay::error::Error;
|
||||
use nostr_rs_relay::event::Event;
|
||||
use nostr_rs_relay::utils::unix_time;
|
||||
|
||||
const RELAY: &str = "wss://nostr.example.com/";
|
||||
|
||||
#[test]
|
||||
fn test_generate_auth_challenge() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let last_auth_challenge = client_conn.auth_challenge().cloned();
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_ne!(
|
||||
client_conn.auth_challenge().unwrap(),
|
||||
&last_auth_challenge.unwrap()
|
||||
);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_authenticate_with_valid_event() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap();
|
||||
let event = auth_event(challenge);
|
||||
|
||||
let result = client_conn.authenticate(&event, RELAY.into());
|
||||
|
||||
assert!(matches!(result, Ok(())));
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_in_invalid_state() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let event = auth_event(&"challenge".into());
|
||||
let result = client_conn.authenticate(&event, RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_authenticate_when_already_authenticated() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap().clone();
|
||||
|
||||
let event = auth_event(&challenge);
|
||||
let result = client_conn.authenticate(&event, RELAY.into());
|
||||
|
||||
assert!(matches!(result, Ok(())));
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey));
|
||||
|
||||
let event1 = auth_event(&challenge);
|
||||
let result1 = client_conn.authenticate(&event1, RELAY.into());
|
||||
|
||||
assert!(matches!(result1, Ok(())));
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey));
|
||||
assert_ne!(client_conn.auth_pubkey(), Some(&event1.pubkey));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_with_invalid_event() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap();
|
||||
let mut event = auth_event(challenge);
|
||||
event.sig = event.sig.chars().rev().collect::<String>();
|
||||
|
||||
let result = client_conn.authenticate(&event, RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_with_invalid_event_kind() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap();
|
||||
let event = auth_event_with_kind(challenge, 9999999999999999);
|
||||
|
||||
let result = client_conn.authenticate(&event, RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_with_expired_timestamp() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap();
|
||||
let event = auth_event_with_created_at(challenge, unix_time() - 1200); // 20 minutes
|
||||
|
||||
let result = client_conn.authenticate(&event, RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_with_future_timestamp() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap();
|
||||
let event = auth_event_with_created_at(challenge, unix_time() + 1200); // 20 minutes
|
||||
|
||||
let result = client_conn.authenticate(&event, RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_without_tags() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let event = auth_event_without_tags();
|
||||
|
||||
let result = client_conn.authenticate(&event, RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_without_challenge() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let event = auth_event_without_challenge();
|
||||
|
||||
let result = client_conn.authenticate(&event, RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_without_relay() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap();
|
||||
let event = auth_event_without_relay(challenge);
|
||||
|
||||
let result = client_conn.authenticate(&event, RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_with_invalid_challenge() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let event = auth_event(&"invalid challenge".into());
|
||||
|
||||
let result = client_conn.authenticate(&event, RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fail_to_authenticate_with_invalid_relay() {
|
||||
let mut client_conn = ClientConn::new("127.0.0.1".into());
|
||||
|
||||
assert_eq!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
client_conn.generate_auth_challenge();
|
||||
|
||||
assert_ne!(client_conn.auth_challenge(), None);
|
||||
assert_eq!(client_conn.auth_pubkey(), None);
|
||||
|
||||
let challenge = client_conn.auth_challenge().unwrap();
|
||||
let event = auth_event_with_relay(challenge, &"xyz".into());
|
||||
|
||||
let result = client_conn.authenticate(&event, RELAY.into());
|
||||
|
||||
assert!(matches!(result, Err(Error::AuthFailure)));
|
||||
}
|
||||
|
||||
fn auth_event(challenge: &String) -> Event {
|
||||
create_auth_event(Some(challenge), Some(&RELAY.into()), 22242, unix_time())
|
||||
}
|
||||
|
||||
fn auth_event_with_kind(challenge: &String, kind: u64) -> Event {
|
||||
create_auth_event(Some(challenge), Some(&RELAY.into()), kind, unix_time())
|
||||
}
|
||||
|
||||
fn auth_event_with_created_at(challenge: &String, created_at: u64) -> Event {
|
||||
create_auth_event(Some(challenge), Some(&RELAY.into()), 22242, created_at)
|
||||
}
|
||||
|
||||
fn auth_event_without_challenge() -> Event {
|
||||
create_auth_event(None, Some(&RELAY.into()), 22242, unix_time())
|
||||
}
|
||||
|
||||
fn auth_event_without_relay(challenge: &String) -> Event {
|
||||
create_auth_event(Some(challenge), None, 22242, unix_time())
|
||||
}
|
||||
|
||||
fn auth_event_without_tags() -> Event {
|
||||
create_auth_event(None, None, 22242, unix_time())
|
||||
}
|
||||
|
||||
fn auth_event_with_relay(challenge: &String, relay: &String) -> Event {
|
||||
create_auth_event(Some(challenge), Some(relay), 22242, unix_time())
|
||||
}
|
||||
|
||||
fn create_auth_event(
|
||||
challenge: Option<&String>,
|
||||
relay: Option<&String>,
|
||||
kind: u64,
|
||||
created_at: u64,
|
||||
) -> Event {
|
||||
let secp = Secp256k1::new();
|
||||
let key_pair = KeyPair::new(&secp, &mut rand::thread_rng());
|
||||
let public_key = XOnlyPublicKey::from_keypair(&key_pair);
|
||||
|
||||
let mut tags: Vec<Vec<String>> = vec![];
|
||||
|
||||
if let Some(c) = challenge {
|
||||
let tag = vec!["challenge".into(), c.into()];
|
||||
tags.push(tag);
|
||||
}
|
||||
|
||||
if let Some(r) = relay {
|
||||
let tag = vec!["relay".into(), r.into()];
|
||||
tags.push(tag);
|
||||
}
|
||||
|
||||
let mut event = Event {
|
||||
id: "0".to_owned(),
|
||||
pubkey: public_key.to_hex(),
|
||||
delegated_by: None,
|
||||
created_at,
|
||||
kind,
|
||||
tags,
|
||||
content: "".to_owned(),
|
||||
sig: "0".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
|
||||
let c = event.to_canonical().unwrap();
|
||||
let digest: sha256::Hash = sha256::Hash::hash(c.as_bytes());
|
||||
|
||||
let msg = secp256k1::Message::from_slice(digest.as_ref()).unwrap();
|
||||
let sig = secp.sign_schnorr(&msg, &key_pair);
|
||||
|
||||
event.id = format!("{digest:x}");
|
||||
event.sig = sig.to_hex();
|
||||
|
||||
event
|
||||
}
|
||||
}
|
@@ -1,8 +1,10 @@
|
||||
use anyhow::Result;
|
||||
|
||||
use futures::SinkExt;
|
||||
use futures::StreamExt;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio_tungstenite::connect_async;
|
||||
use tracing::info;
|
||||
mod common;
|
||||
|
||||
#[tokio::test]
|
||||
@@ -45,3 +47,33 @@ async fn relay_home_page() -> Result<()> {
|
||||
let _res = relay.shutdown_tx.send(());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
//#[tokio::test]
|
||||
// Still inwork
|
||||
async fn publish_test() -> Result<()> {
|
||||
// get a relay and wait for startup
|
||||
let relay = common::start_relay()?;
|
||||
common::wait_for_healthy_relay(&relay).await?;
|
||||
// open a non-secure websocket connection.
|
||||
let (mut ws, _res) = connect_async(format!("ws://localhost:{}", relay.port)).await?;
|
||||
// send a simple pre-made message
|
||||
let simple_event = r#"["EVENT", {"content": "hello world","created_at": 1691239763,
|
||||
"id":"f3ce6798d70e358213ebbeba4886bbdfacf1ecfd4f65ee5323ef5f404de32b86",
|
||||
"kind": 1,
|
||||
"pubkey": "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798",
|
||||
"sig": "30ca29e8581eeee75bf838171dec818af5e6de2b74f5337de940f5cc91186534c0b20d6cf7ad1043a2c51dbd60b979447720a471d346322103c83f6cb66e4e98",
|
||||
"tags": []}]"#;
|
||||
ws.send(simple_event.into()).await?;
|
||||
// get response from server, confirm it is an array with first element "OK"
|
||||
let event_confirm = ws.next().await;
|
||||
ws.close(None).await?;
|
||||
info!("event confirmed: {:?}", event_confirm);
|
||||
// open a new connection, and wait for some time to get the event.
|
||||
let (mut sub_ws, _res) = connect_async(format!("ws://localhost:{}", relay.port)).await?;
|
||||
let event_sub = r#"["REQ", "simple", {}]"#;
|
||||
sub_ws.send(event_sub.into()).await?;
|
||||
// read from subscription
|
||||
let _ws_next = sub_ws.next().await;
|
||||
let _res = relay.shutdown_tx.send(());
|
||||
Ok(())
|
||||
}
|
||||
|
Reference in New Issue
Block a user