Compare commits

...

33 Commits

Author SHA1 Message Date
Greg Heartsfield
21d1bbcfe3 build: bump version to 0.8.10 2023-08-05 11:18:12 -05:00
Greg Heartsfield
c3e13af9e3 test: wip integration test for event publishing 2023-08-05 11:16:11 -05:00
Greg Heartsfield
05f70112e8 improvement: reduce logging for hex parse failures in events 2023-08-05 07:13:53 -05:00
Greg Heartsfield
eab522dc39 feat: warn or exit on config file parse errors
The relay will now fail to start if an invalid config file is
explicitly provided.  If the file was read implicitly from the current
directory, a warning will be provided, but the relay will still startup.
2023-07-29 08:33:27 -05:00
Iru Sensei
edf7af1573 feat: verify config file exists and can be read 2023-07-29 08:32:55 -05:00
Václav Navrátil
34f497a650 docs: example SQL to delete old events
Added SQL Query example to delete events older than 30 days.
2023-07-29 06:45:17 -05:00
Greg Heartsfield
4adad4c3a9 fix: update since/until semantics for subscriptions 2023-07-16 11:42:55 -05:00
Václav Navrátil
70dfcb6a04 feat(NIP-11): relay_icon option added 2023-07-16 11:42:41 -05:00
jiftechnify
c50e10aa21 fix: keep up with the latest specs for since/until filter 2023-07-15 11:12:38 -05:00
Greg Heartsfield
9e22776227 refactor: whitespace 2023-07-03 10:35:51 -05:00
Greg Heartsfield
dad6911807 refactor: clippy suggestions 2023-07-03 10:31:22 -05:00
thesimplekid
ddc58a2f1c feat: config sending dms on pay to relay signup 2023-07-03 09:51:28 -05:00
thesimplekid
1131c1986e fix: lnbits expired invoice for existing user 2023-07-03 09:51:07 -05:00
thesimplekid
06fcaad9a1 chore: typos 2023-07-03 09:49:40 -05:00
Greg Heartsfield
087b68128f fix: ensure startup SQL runs, even with zero min writers 2023-06-23 10:38:06 -05:00
Greg Heartsfield
4647476622 improvement: default to logging on stdout 2023-06-23 10:34:25 -05:00
Greg Heartsfield
7a72e588ea refactor: reorder imports 2023-06-23 10:03:08 -05:00
Jamin M
9237eed735 feat: roll over logs daily 2023-06-23 10:03:01 -05:00
Jamin M
f4beb884b3 feat: allow logging output to file 2023-06-23 10:02:49 -05:00
Yuval Adam
73285683a3 docs: add database maintenance example queries 2023-06-23 09:55:05 -05:00
rorp
2f10271903 improvement(NIP-42): use 'restricted:' prefix for auth error msgs 2023-06-23 09:52:50 -05:00
thesimplekid
a34516628b docs: typo in build-essential package name 2023-06-23 09:48:43 -05:00
Greg Heartsfield
eba7a32615 perf: reduce SQLite connection count and idle lifetime
On lightly loaded relays, we free up memory faster by letting idle
connections be reclaimed in 10 seconds instead of the default 10
minutes.  This also sets the minimum to zero connections, instead of
always trying to hold one open.
2023-05-07 19:38:18 -05:00
Greg Heartsfield
4d746fad85 docs: helpful ubuntu packages for building 2023-05-07 19:33:10 -05:00
Greg Heartsfield
0582a891cc perf: switch to jemalloc allocator 2023-05-07 19:32:50 -05:00
Greg Heartsfield
2bcddf8bbf perf: disable sqlite mmap to reduce memory pressure 2023-05-06 15:40:56 -05:00
Greg Heartsfield
1595ec783d docs: allow host header prefix matching, required for Damus compatibility 2023-05-06 14:43:30 -05:00
Greg Heartsfield
a2d1d78e23 docs: reformatting 2023-05-06 14:42:59 -05:00
Greg Heartsfield
04db2203bb perf: use standard allocator, limit sqlite mmap to 4GB
This is an experimental change to see if we can reduce memory usage
with large SQLite databases.  If successful, we'll do this again and
further reduce the database mmap size.

This will cause greater use of the page cache, but that is more easily
reclaimed by the kernel, and should reduce memory pressure, as well as
making it clearer how much memory the application is actually using
for connections, subscriptions, etc.
2023-05-03 07:22:44 -05:00
Greg Heartsfield
1c1b1a1802 build: upgrade checkout action for github ci 2023-04-30 11:13:03 -05:00
Greg Heartsfield
993fec4eed improvement: document pg connection_write config 2023-04-30 10:10:06 -05:00
Kieran
beffeb4d86 improvement: add a configurable postgres write conn string
This adds a new configurable connection string for postgres writes.
2023-04-30 10:02:10 -05:00
Petr Kracik
5135f3b007 improvement: use appropriate paths for systemd example 2023-04-30 09:55:07 -05:00
34 changed files with 399 additions and 201 deletions

View File

@@ -9,7 +9,7 @@ jobs:
test_nostr-rs-relay:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Update local toolchain
run: |

2
.gitignore vendored
View File

@@ -1,4 +1,4 @@
**/target/
nostr.db
nostr.db-*
justfile
justfile

118
Cargo.lock generated
View File

@@ -54,15 +54,6 @@ dependencies = [
"libc",
]
[[package]]
name = "ansi_term"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
dependencies = [
"winapi",
]
[[package]]
name = "anyhow"
version = "1.0.69"
@@ -420,7 +411,7 @@ dependencies = [
"js-sys",
"num-integer",
"num-traits",
"time",
"time 0.1.45",
"wasm-bindgen",
"winapi",
]
@@ -565,7 +556,7 @@ dependencies = [
"tonic",
"tracing",
"tracing-core",
"tracing-subscriber 0.3.16",
"tracing-subscriber",
]
[[package]]
@@ -1511,15 +1502,6 @@ dependencies = [
"libc",
]
[[package]]
name = "matchers"
version = "0.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1"
dependencies = [
"regex-automata",
]
[[package]]
name = "matchers"
version = "0.1.0"
@@ -1695,12 +1677,23 @@ dependencies = [
"tonic",
"tonic-build",
"tracing",
"tracing-subscriber 0.2.25",
"tracing-appender",
"tracing-subscriber",
"tungstenite",
"url",
"uuid",
]
[[package]]
name = "nu-ansi-term"
version = "0.46.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
dependencies = [
"overload",
"winapi",
]
[[package]]
name = "num"
version = "0.2.1"
@@ -1861,6 +1854,12 @@ version = "6.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
[[package]]
name = "overload"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
[[package]]
name = "parking"
version = "2.0.0"
@@ -2990,6 +2989,33 @@ dependencies = [
"winapi",
]
[[package]]
name = "time"
version = "0.3.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890"
dependencies = [
"itoa",
"serde",
"time-core",
"time-macros",
]
[[package]]
name = "time-core"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd"
[[package]]
name = "time-macros"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36"
dependencies = [
"time-core",
]
[[package]]
name = "tinyvec"
version = "1.6.0"
@@ -3223,6 +3249,17 @@ dependencies = [
"tracing-core",
]
[[package]]
name = "tracing-appender"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e"
dependencies = [
"crossbeam-channel",
"time 0.3.20",
"tracing-subscriber",
]
[[package]]
name = "tracing-attributes"
version = "0.1.23"
@@ -3265,51 +3302,22 @@ dependencies = [
"tracing-core",
]
[[package]]
name = "tracing-serde"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1"
dependencies = [
"serde",
"tracing-core",
]
[[package]]
name = "tracing-subscriber"
version = "0.2.25"
version = "0.3.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71"
checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70"
dependencies = [
"ansi_term",
"chrono",
"lazy_static",
"matchers 0.0.1",
"matchers",
"nu-ansi-term",
"once_cell",
"regex",
"serde",
"serde_json",
"sharded-slab",
"smallvec",
"thread_local",
"tracing",
"tracing-core",
"tracing-log",
"tracing-serde",
]
[[package]]
name = "tracing-subscriber"
version = "0.3.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70"
dependencies = [
"matchers 0.1.0",
"once_cell",
"regex",
"sharded-slab",
"thread_local",
"tracing",
"tracing-core",
]
[[package]]

View File

@@ -1,6 +1,6 @@
[package]
name = "nostr-rs-relay"
version = "0.8.9"
version = "0.8.10"
edition = "2021"
authors = ["Greg Heartsfield <scsibug@imap.cc>"]
description = "A relay implementation for the Nostr protocol"
@@ -13,8 +13,9 @@ categories = ["network-programming", "web-programming"]
[dependencies]
clap = { version = "4.0.32", features = ["env", "default", "derive"]}
tracing = "0.1.36"
tracing-subscriber = "0.2.0"
tracing = "0.1.37"
tracing-appender = "0.2.2"
tracing-subscriber = "0.3.16"
tokio = { version = "1", features = ["full", "tracing", "signal"] }
prost = "0.11"
tonic = "0.8.3"

View File

@@ -93,6 +93,11 @@ https://hub.docker.com/r/scsibug/nostr-rs-relay
Building `nostr-rs-relay` requires an installation of Cargo & Rust: https://www.rust-lang.org/tools/install
The following OS packages will be helpful; on Debian/Ubuntu:
```console
$ sudo apt-get install build-essential cmake protobuf-compiler pkg-config libssl-dev
```
Clone this repository, and then build a release version of the relay:
```console

View File

@@ -20,6 +20,9 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
# ICO format.
#favicon = "favicon.ico"
# URL of Relay's icon.
#relay_icon = "https://example.test/img.png"
[diagnostics]
# Enable tokio tracing (for use with tokio-console)
#tracing = false
@@ -37,12 +40,12 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
# Use an in-memory database instead of 'nostr.db'.
# Requires sqlite engine.
# Caution; this will not survive a process restart!
#in_memory = false
in_memory = true
# Database connection pool settings for subscribers:
# Minimum number of SQLite reader connections
#min_conn = 4
#min_conn = 0
# Maximum number of SQLite reader connections. Recommend setting this
# to approx the number of cores.
@@ -52,6 +55,15 @@ description = "A newly created nostr-rs-relay.\n\nCustomize this with your own i
# sqlite.
#connection = "postgresql://postgres:nostr@localhost:7500/nostr"
# Optional database connection string for writing. Use this for
# postgres clusters where you want to separate reads and writes to
# different nodes. Ignore for single-database instances.
#connection_write = "postgresql://postgres:nostr@localhost:7500/nostr"
[logging]
# Directory to store log files. Log files roll over daily.
#folder_path = "./log"
#file_prefix = "nostr-relay"
[grpc]
# gRPC interfaces for externalized decisions and other extensions to
@@ -193,6 +205,9 @@ reject_future_seconds = 1800
# LNBits api secret
#api_secret = "<ln bits api>"
# Nostr direct message on signup
#direct_message=true
# Terms of service
#terms_message = """
#This service (and supporting services) are provided "as is", without warranty of any kind, express or implied.
@@ -214,4 +229,6 @@ reject_future_seconds = 1800
# Whether or not new sign ups should be allowed
#sign_ups = false
# optional if `direct_message=false`
#secret_key = "<nostr nsec>"

View File

@@ -1,12 +1,14 @@
[Unit]
Description=nostr-rs-relay
[Service]
User=REPLACE_WITH_YOUR_USERNAME
WorkingDirectory=/usr/bin/
WorkingDirectory=/var/lib/nostr-rs-relay
Environment=RUST_LOG=warn,nostr_rs_relay=info
ExecStart=nostr-rs-relay --config /etc/nostr-rs-relay/config.toml
ExecStart=/usr/bin/nostr-rs-relay --config /etc/nostr-rs-relay/config.toml
TimeoutStopSec=10
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
WantedBy=multi-user.target

View File

@@ -78,18 +78,24 @@ PRAGMA foreign_keys = ON;
delete from event where event_hash=x'00000000000c1271675dc86e3e1dd1336827bccabb90dc4c9d3b4465efefe00e';
```
### Deleting All Events for Pubkey
### Querying and Deleting All Events for Pubkey
```console
PRAGMA foreign_keys = ON;
select lower(hex(author)) as author, count(*) as c from event group by author order by c asc;
delete from event where author=x'000000000002c7831d9c5a99f183afc2813a6f69a16edda7f6fc0ed8110566e6';
```
### Deleting All Events of a Kind
### Querying and Deleting All Events of a Kind
```console
PRAGMA foreign_keys = ON;
select printf('%7d', kind), count(*) as c from event group by kind order by c;
delete from event where kind=70202;
```
@@ -106,7 +112,8 @@ seen" policy.
```console
PRAGMA foreign_keys = ON;
TODO!
DELETE FROM event WHERE first_seen < CAST(strftime('%s', date('now', '-30 day')) AS INT);
```
### Delete Profile Events with No Recent Events

View File

@@ -10,7 +10,7 @@ and reduce spam and abuse.
This will likely evolve substantially, the first goal is to get a
basic one-way service that lets an externalized program decide on
event persistance. This does not represent the final state of gRPC
event persistence. This does not represent the final state of gRPC
extensibility in `nostr-rs-relay`.
## Considerations

View File

@@ -22,18 +22,18 @@ api_secret = "<LNbits api key>"
# Terms of service
terms_message = """This service ....
"""
# Whether or not new sign ups should be allowed
sign_ups = true
# Whether or not new sign ups should be allowed
sign_ups = true
secret_key = "<nostr secret key to send dms>"
```
The LNBits instance must have a signed HTTPS a self signed certificate will not work.
The LNBits instance must have a signed HTTPS a self signed certificate will not work.
## Design Overview
### Concepts
All authors are initially not admitted to write to the relay. There are two ways to gain access write to the relay. The first is by attempting to post the the relay, upon receiving an event from an author that is not admitted, the relay will send a direct message including the terms of service of the relay and a lighting invoice for the admission cost. Once this invoice is payed the author can write to the relay. For this method to work the author must be reading from the relay. An author can also pay and accept the terms of service via a webpage `https://<relay-url>/join`.
All authors are initially not admitted to write to the relay. There are two ways to gain access write to the relay. The first is by attempting to post the the relay, upon receiving an event from an author that is not admitted, the relay will send a direct message including the terms of service of the relay and a lighting invoice for the admission cost. Once this invoice is paid the author can write to the relay. For this method to work the author must be reading from the relay. An author can also pay and accept the terms of service via a webpage `https://<relay-url>/join`.
## Design Details
@@ -54,7 +54,7 @@ Invoice information is stored in a dedicated table. This tracks:
* `created_at` timestamp of creation
* `confirmed_at` timestamp of payment
### Event Handling
### Event Handling
If "pay to relay" is enabled, all incoming events are evaluated to determine whether the author is on the relay's whitelist or if they have paid the admission fee and accepted the terms. If "pay per note" is enabled, there is an additional check to ensure that the author has enough balance, which is then reduced by the cost per note. If the author is on the whitelist, this balance check is not necessary.
@@ -77,9 +77,8 @@ simply to demonstrate a mitigation is possible.
*Mitigation*: Rate limit number of new sign ups
### Admitted Author Spamming
### Admitted Author Spamming
*Threat*: An attacker gains write access by paying the admission fee, and then floods the relay with a large number of spam events.
*Mitigation*: The attacker's admission can be revoked and their admission fee will not be refunded. Enabling "cost per event" and increasing the admission cost can also discourage this type of behavior.

View File

@@ -29,7 +29,7 @@ frontend fe_prod
bind :80
http-request set-header X-Forwarded-Proto https if { ssl_fc }
redirect scheme https code 301 if !{ ssl_fc }
acl host_relay hdr(host) -i relay.example.com
acl host_relay hdr(host) -i -m beg relay.example.com
use_backend relay if host_relay
# HSTS (1 year)
http-response set-header Strict-Transport-Security max-age=31536000
@@ -117,10 +117,10 @@ Assumptions:
* `Traefik` version is `2.9` (other versions not tested).
* `Traefik` is used for provisioning of Let's Encrypt certificates.
* `Traefik` is running in `Docker`, using `docker compose` and labels for the static configuration. An equivalent setup useing a Traefik config file is possible too (but not covered here).
* `Traefik` is running in `Docker`, using `docker compose` and labels for the static configuration. An equivalent setup using a Traefik config file is possible too (but not covered here).
* Strict Transport Security is enabled.
* Hostname for the relay is `relay.example.com`, email adres for ACME certificates provider is `name@example.com`.
* ipv6 is enabled, a viable private ipv6 subnet is specified in the example below.
* Hostname for the relay is `relay.example.com`, email address for ACME certificates provider is `name@example.com`.
* ipv6 is enabled, a viable private ipv6 subnet is specified in the example below.
* Relay is running on port `8080`.
```
@@ -196,4 +196,4 @@ services:
### Traefik Notes
Traefik will take care of the provisioning and renewal of certificates. In case of an ipv4-only relay, simply detele the `enable_ipv6:` and `ipam:` entries in the `networks:` section of the docker-compose file.
Traefik will take care of the provisioning and renewal of certificates. In case of an ipv4-only relay, simply detele the `enable_ipv6:` and `ipam:` entries in the `networks:` section of the docker-compose file.

View File

@@ -12,13 +12,13 @@ Start by building the application from source. Here is how to do that:
3. `cargo build --release`
### Place the files where they belong
We want to palce the nostr-rs-relay binary and the config.toml file where they belong. While still in the root level of the nostr-rs-relay folder you cloned in last step, run the following commands:
We want to place the nostr-rs-relay binary and the config.toml file where they belong. While still in the root level of the nostr-rs-relay folder you cloned in last step, run the following commands:
1. `sudo cp target/release/nostr-rs-relay /usr/local/bin/`
2. `sudo mkdir /etc/nostr-rs-relay`
2. `sudo cp config.toml /etc/nostr-rs-relay`
### Create the Systemd service file
We need to create a new Systemd service file. These files are placed in the `/etc/systemd/system/` folder where you will find many other services running.
We need to create a new Systemd service file. These files are placed in the `/etc/systemd/system/` folder where you will find many other services running.
1. `sudo vim /etc/systemd/system/nostr-rs-relay.service`
2. Paste in the contents of [this service file](../contrib/nostr-rs-relay.service). Remember to replace the `User` value with your own username.
@@ -36,4 +36,4 @@ To get the service running, we need to reload the systemd daemon and enable the
### Tips
#### Logs
The application will write logs to the journal. To read it, execute `sudo journalctl -f -u nostr-rs-relay`
The application will write logs to the journal. To read it, execute `sudo journalctl -f -u nostr-rs-relay`

View File

@@ -179,7 +179,7 @@ attempts to persist them to disk. Once validated and persisted, these
events are broadcast to all subscribers.
When verification is enabled, the writer must check to ensure a valid,
unexpired verification record exists for the auther. All metadata
unexpired verification record exists for the author. All metadata
events (regardless of verification status) are forwarded to a verifier
module. If the verifier determines a new verification record is
needed, it is also responsible for persisting and broadcasting the

View File

@@ -20,7 +20,7 @@ pub fn main() -> Result<()> {
let _trace_sub = tracing_subscriber::fmt::try_init();
println!("Nostr-rs-relay Bulk Loader");
// check for a database file, or create one.
let settings = config::Settings::new(&None);
let settings = config::Settings::new(&None)?;
if !Path::new(&settings.database.data_directory).is_dir() {
info!("Database directory does not exist");
return Err(Error::DatabaseDirError);

View File

@@ -1,10 +1,8 @@
//! Configuration file and settings management
use crate::payment::Processor;
use config::{Config, ConfigError, File};
use serde::{Deserialize, Serialize};
use std::time::Duration;
use tracing::warn;
use crate::payment::Processor;
#[derive(Debug, Serialize, Deserialize, Clone)]
#[allow(unused)]
@@ -15,6 +13,7 @@ pub struct Info {
pub pubkey: Option<String>,
pub contact: Option<String>,
pub favicon: Option<String>,
pub relay_icon: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -26,6 +25,7 @@ pub struct Database {
pub min_conn: u32,
pub max_conn: u32,
pub connection: String,
pub connection_write: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -80,7 +80,7 @@ pub struct Limits {
pub struct Authorization {
pub pubkey_whitelist: Option<Vec<String>>, // If present, only allow these pubkeys to publish events
pub nip42_auth: bool, // if true enables NIP-42 authentication
pub nip42_dms: bool, // if true send DMs only to their authenticated recipients
pub nip42_dms: bool, // if true send DMs only to their authenticated recipients
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -92,8 +92,9 @@ pub struct PayToRelay {
pub node_url: String,
pub api_secret: String,
pub terms_message: String,
pub sign_ups: bool, // allow new users to sign up to relay
pub secret_key: String,
pub sign_ups: bool, // allow new users to sign up to relay
pub direct_message: bool, // Send direct message to user with invoice and terms
pub secret_key: Option<String>,
pub processor: Processor,
}
@@ -165,6 +166,13 @@ impl VerifiedUsers {
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Logging {
pub folder_path: Option<String>,
pub file_prefix: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(unused)]
pub struct Settings {
@@ -179,20 +187,27 @@ pub struct Settings {
pub verified_users: VerifiedUsers,
pub retention: Retention,
pub options: Options,
pub logging: Logging,
}
impl Settings {
#[must_use]
pub fn new(config_file_name: &Option<String>) -> Self {
pub fn new(config_file_name: &Option<String>) -> Result<Self, ConfigError> {
let default_settings = Self::default();
// attempt to construct settings with file
let from_file = Self::new_from_default(&default_settings, config_file_name);
match from_file {
Ok(f) => f,
Err(e) => {
warn!("Error reading config file ({:?})", e);
default_settings
// pass up the parse error if the config file was specified,
// otherwise use the default config (with a warning).
if config_file_name.is_some() {
Err(e)
} else {
eprintln!("Error reading config file ({:?})", e);
eprintln!("WARNING: Default configuration settings will be used");
Ok(default_settings)
}
}
ok => ok,
}
}
@@ -234,7 +249,14 @@ impl Settings {
// Should check that url is valid
assert_ne!(settings.pay_to_relay.node_url, "");
assert_ne!(settings.pay_to_relay.terms_message, "");
assert_ne!(settings.pay_to_relay.secret_key, "");
if settings.pay_to_relay.direct_message {
assert_ne!(
settings.pay_to_relay.secret_key,
Some("<nostr nsec>".to_string())
);
assert!(settings.pay_to_relay.secret_key.is_some());
}
}
Ok(settings)
@@ -251,6 +273,7 @@ impl Default for Settings {
pubkey: None,
contact: None,
favicon: None,
relay_icon: None,
},
diagnostics: Diagnostics { tracing: false },
database: Database {
@@ -260,6 +283,7 @@ impl Default for Settings {
min_conn: 4,
max_conn: 8,
connection: "".to_owned(),
connection_write: None,
},
grpc: Grpc {
event_admission_server: None,
@@ -296,7 +320,8 @@ impl Default for Settings {
node_url: "".to_string(),
api_secret: "".to_string(),
sign_ups: false,
secret_key: "".to_string(),
direct_message: true,
secret_key: None,
processor: Processor::LNBits,
},
verified_users: VerifiedUsers {
@@ -318,6 +343,10 @@ impl Default for Settings {
options: Options {
reject_future_seconds: None, // Reject events in the future if defined
},
logging: Logging {
folder_path: None,
file_prefix: None,
},
}
}
}

View File

@@ -204,7 +204,7 @@ impl ClientConn {
}
}
match (relay.and_then(|url| host_str(url)), host_str(relay_url)) {
match (relay.and_then(host_str), host_str(relay_url)) {
(Some(received_relay), Some(our_relay)) => {
if received_relay != our_relay {
return Err(Error::AuthFailure);

View File

@@ -70,7 +70,26 @@ async fn build_postgres_pool(settings: &Settings, metrics: NostrMetrics) -> Post
.connect_with(options)
.await
.unwrap();
let repo = PostgresRepo::new(pool, metrics);
let write_pool: PostgresPool = match &settings.database.connection_write {
Some(cfg_write) => {
let mut options_write: PgConnectOptions = cfg_write.as_str().parse().unwrap();
options_write.log_statements(LevelFilter::Debug);
options_write.log_slow_statements(LevelFilter::Warn, Duration::from_secs(60));
PoolOptions::new()
.max_connections(settings.database.max_conn)
.min_connections(settings.database.min_conn)
.idle_timeout(Duration::from_secs(60))
.connect_with(options_write)
.await
.unwrap()
}
None => pool.clone(),
};
let repo = PostgresRepo::new(pool, write_pool, metrics);
// Panic on migration failure
let version = repo.migrate_up().await.unwrap();
info!("Postgres migration completed, at v{}", version);
@@ -359,7 +378,7 @@ pub async fn db_writer(
notice_tx
.try_send(Notice::blocked(
event.id,
&decision.message().unwrap_or_else(|| "".to_string()),
&decision.message().unwrap_or_default(),
))
.ok();
continue;

View File

@@ -472,12 +472,11 @@ mod tests {
let mut event = Event::simple_event();
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
event.build_index();
assert_eq!(
assert!(
event.generic_tag_val_intersect(
'e',
&HashSet::from(["foo".to_owned(), "bar".to_owned()])
),
true
)
);
}

View File

@@ -45,6 +45,8 @@ pub struct RelayInfo {
#[serde(skip_serializing_if = "Option::is_none")]
pub contact: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub icon: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub supported_nips: Option<Vec<i64>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub software: Option<String>,
@@ -124,6 +126,7 @@ impl From<Settings> for RelayInfo {
limitation: Some(limitations),
payment_url,
fees,
icon: i.relay_icon,
}
}
}

View File

@@ -14,6 +14,6 @@ pub mod notice;
pub mod repo;
pub mod subscription;
pub mod utils;
// Public API for creating relays programatically
// Public API for creating relays programmatically
pub mod payment;
pub mod server;

View File

@@ -4,13 +4,17 @@ use console_subscriber::ConsoleLayer;
use nostr_rs_relay::cli::CLIArgs;
use nostr_rs_relay::config;
use nostr_rs_relay::server::start_server;
use std::fs;
use std::path::Path;
use std::process;
use std::sync::mpsc as syncmpsc;
use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
use std::thread;
use tracing::info;
#[cfg(not(target_env = "msvc"))]
use tikv_jemallocator::Jemalloc;
use tracing::info;
use tracing_appender::non_blocking::WorkerGuard;
use tracing_subscriber::EnvFilter;
#[cfg(not(target_env = "msvc"))]
#[global_allocator]
@@ -23,9 +27,35 @@ fn main() {
// get config file name from args
let config_file_arg = args.config;
// Ensure the config file is readable if it was explicitly set
if let Some(config_path) = config_file_arg.as_ref() {
let path = Path::new(&config_path);
if !path.exists() {
eprintln!("Config file not found: {}", &config_path);
process::exit(1);
}
if !path.is_file() {
eprintln!("Invalid config file path: {}", &config_path);
process::exit(1);
}
if let Err(err) = fs::metadata(path) {
eprintln!("Error while accessing file metadata: {}", err);
process::exit(1);
}
if let Err(err) = fs::File::open(path) {
eprintln!("Config file is not readable: {}", err);
process::exit(1);
}
}
let mut _log_guard: Option<WorkerGuard> = None;
// configure settings from the config file (defaults to config.toml)
// replace default settings with those read from the config file
let mut settings = config::Settings::new(&config_file_arg);
let mut settings = config::Settings::new(&config_file_arg).unwrap_or_else(|e| {
eprintln!("Error reading config file ({:?})", e);
process::exit(1);
});
// setup tracing
if settings.diagnostics.tracing {
@@ -33,7 +63,27 @@ fn main() {
ConsoleLayer::builder().with_default_env().init();
} else {
// standard logging
tracing_subscriber::fmt::try_init().unwrap();
if let Some(path) = &settings.logging.folder_path {
// write logs to a folder
let prefix = match &settings.logging.file_prefix {
Some(p) => p.as_str(),
None => "relay",
};
let file_appender = tracing_appender::rolling::daily(path, prefix);
let (non_blocking, guard) = tracing_appender::non_blocking(file_appender);
let filter = EnvFilter::from_default_env();
// assign to a variable that is not dropped till the program ends
_log_guard = Some(guard);
tracing_subscriber::fmt()
.with_env_filter(filter)
.with_writer(non_blocking)
.try_init()
.unwrap();
} else {
// write to stdout
tracing_subscriber::fmt::try_init().unwrap();
}
}
info!("Starting up from main");

View File

@@ -35,7 +35,7 @@ impl std::convert::From<Nip05Name> for nauthz_grpc::event_request::Nip05Name {
fn from(value: Nip05Name) -> Self {
nauthz_grpc::event_request::Nip05Name {
local: value.local.clone(),
domain: value.domain.clone(),
domain: value.domain,
}
}
}
@@ -57,7 +57,7 @@ impl EventAuthzService {
eas
}
pub async fn ready_connection(self: &mut Self) {
pub async fn ready_connection(&mut self) {
if self.conn.is_none() {
let client = AuthorizationClient::connect(self.server_addr.to_string()).await;
if let Err(ref msg) = client {
@@ -70,7 +70,7 @@ impl EventAuthzService {
}
pub async fn admit_event(
self: &mut Self,
&mut self,
event: &Event,
ip: &str,
origin: Option<String>,
@@ -99,13 +99,13 @@ impl EventAuthzService {
origin,
user_agent,
auth_pubkey,
nip05: nip05.map(|x| nauthz_grpc::event_request::Nip05Name::from(x)),
nip05: nip05.map(nauthz_grpc::event_request::Nip05Name::from),
})
.await?;
let reply = svr_res.into_inner();
return Ok(Box::new(reply));
Ok(Box::new(reply))
} else {
return Err(Error::AuthzError);
Err(Error::AuthzError)
}
}
}

View File

@@ -5,6 +5,7 @@ pub enum EventResultStatus {
Blocked,
RateLimited,
Error,
Restricted,
}
pub struct EventResult {
@@ -24,7 +25,7 @@ impl EventResultStatus {
pub fn to_bool(&self) -> bool {
match self {
Self::Duplicate | Self::Saved => true,
Self::Invalid | Self::Blocked | Self::RateLimited | Self::Error => false,
Self::Invalid | Self::Blocked | Self::RateLimited | Self::Error | Self::Restricted => false,
}
}
@@ -37,6 +38,7 @@ impl EventResultStatus {
Self::Blocked => "blocked",
Self::RateLimited => "rate-limited",
Self::Error => "error",
Self::Restricted => "restricted",
}
}
}
@@ -81,6 +83,11 @@ impl Notice {
Notice::prefixed(id, msg, EventResultStatus::Error)
}
#[must_use]
pub fn restricted(id: String, msg: &str) -> Notice {
Notice::prefixed(id, msg, EventResultStatus::Restricted)
}
#[must_use]
pub fn saved(id: String) -> Notice {
Notice::EventResult(EventResult {

View File

@@ -5,10 +5,10 @@ use hyper::Client;
use hyper_tls::HttpsConnector;
use nostr::Keys;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use async_trait::async_trait;
use rand::Rng;
use tracing::debug;
use std::str::FromStr;
use url::Url;
@@ -110,8 +110,7 @@ impl PaymentProcessor for LNBitsPaymentProcessor {
expiry: 3600,
};
let url = Url::parse(&self.settings.pay_to_relay.node_url)?.join(APIPATH)?;
let uri = Uri::from_str(url.as_str().strip_suffix("/").unwrap_or(url.as_str())).unwrap();
debug!("{uri}");
let uri = Uri::from_str(url.as_str().strip_suffix('/').unwrap_or(url.as_str())).unwrap();
let req = hyper::Request::builder()
.method(hyper::Method::POST)
@@ -122,14 +121,10 @@ impl PaymentProcessor for LNBitsPaymentProcessor {
let res = self.client.request(req).await?;
debug!("{res:?}");
// Json to Struct of LNbits callback
let body = hyper::body::to_bytes(res.into_body()).await?;
let invoice_response: LNBitsCreateInvoiceResponse = serde_json::from_slice(&body)?;
debug!("{:?}", invoice_response);
Ok(InvoiceInfo {
pubkey: key.public_key().to_string(),
payment_hash: invoice_response.payment_hash,
@@ -147,7 +142,6 @@ impl PaymentProcessor for LNBitsPaymentProcessor {
.join(APIPATH)?
.join(payment_hash)?;
let uri = Uri::from_str(url.as_str()).unwrap();
debug!("{uri}");
let req = hyper::Request::builder()
.method(hyper::Method::GET)
@@ -159,13 +153,18 @@ impl PaymentProcessor for LNBitsPaymentProcessor {
let res = self.client.request(req).await?;
// Json to Struct of LNbits callback
let body = hyper::body::to_bytes(res.into_body()).await?;
debug!("check invoice: {body:?}");
let invoice_response: LNBitsCheckInvoiceResponse = serde_json::from_slice(&body)?;
let invoice_response: Value = serde_json::from_slice(&body)?;
let status = if invoice_response.paid {
InvoiceStatus::Paid
let status = if let Ok(invoice_response) =
serde_json::from_value::<LNBitsCheckInvoiceResponse>(invoice_response)
{
if invoice_response.paid {
InvoiceStatus::Paid
} else {
InvoiceStatus::Unpaid
}
} else {
InvoiceStatus::Unpaid
InvoiceStatus::Expired
};
Ok(status)

View File

@@ -25,7 +25,7 @@ pub struct Payment {
/// Settings
settings: crate::config::Settings,
// Nostr Keys
nostr_keys: Keys,
nostr_keys: Option<Keys>,
/// Payment Processor
processor: Arc<dyn PaymentProcessor>,
}
@@ -102,7 +102,11 @@ impl Payment {
info!("Create payment handler");
// Create nostr key from sk string
let nostr_keys = Keys::from_sk_str(&settings.pay_to_relay.secret_key)?;
let nostr_keys = if let Some(secret_key) = &settings.pay_to_relay.secret_key {
Some(Keys::from_sk_str(secret_key)?)
} else {
None
};
// Create processor kind defined in settings
let processor = match &settings.pay_to_relay.processor {
@@ -130,7 +134,7 @@ impl Payment {
}
}
/// Internal select loop for preforming payment operatons
/// Internal select loop for preforming payment operations
async fn run_internal(&mut self) -> Result<()> {
tokio::select! {
m = self.payment_rx.recv() => {
@@ -148,7 +152,7 @@ impl Payment {
Ok(PaymentMessage::CheckAccount(pubkey)) => {
let keys = Keys::from_pk_str(&pubkey)?;
if let Some(invoice_info) = self.repo.get_unpaid_invoice(&keys).await? {
if let Ok(Some(invoice_info)) = self.repo.get_unpaid_invoice(&keys).await {
match self.check_invoice_status(&invoice_info.payment_hash).await? {
InvoiceStatus::Paid => {
self.repo.admit_account(&keys, self.settings.pay_to_relay.admission_cost).await?;
@@ -158,6 +162,10 @@ impl Payment {
self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok();
}
}
} else {
let amount = self.settings.pay_to_relay.admission_cost;
let invoice_info = self.get_invoice_info(&pubkey, amount).await?;
self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok();
}
}
Ok(PaymentMessage::InvoicePaid(payment_hash)) => {
@@ -189,6 +197,11 @@ impl Payment {
pubkey: &str,
invoice_info: &InvoiceInfo,
) -> Result<()> {
let nostr_keys = match &self.nostr_keys {
Some(key) => key,
None => return Err(Error::CustomError("Nostr key not defined".to_string())),
};
// Create Nostr key from pk
let key = Keys::from_pk_str(pubkey)?;
@@ -196,16 +209,16 @@ impl Payment {
// Event DM with terms of service
let message_event: NostrEvent = EventBuilder::new_encrypted_direct_msg(
&self.nostr_keys,
nostr_keys,
pubkey,
&self.settings.pay_to_relay.terms_message,
)?
.to_event(&self.nostr_keys)?;
.to_event(nostr_keys)?;
// Event DM with invoice
let invoice_event: NostrEvent =
EventBuilder::new_encrypted_direct_msg(&self.nostr_keys, pubkey, &invoice_info.bolt11)?
.to_event(&self.nostr_keys)?;
EventBuilder::new_encrypted_direct_msg(nostr_keys, pubkey, &invoice_info.bolt11)?
.to_event(nostr_keys)?;
// Persist DM events to DB
self.repo.write_event(&message_event.clone().into()).await?;
@@ -242,8 +255,10 @@ impl Payment {
.create_invoice_record(&key, invoice_info.clone())
.await?;
// Admission event invoice and terms to pubkey that is joining
self.send_admission_message(pubkey, &invoice_info).await?;
if self.settings.pay_to_relay.direct_message {
// Admission event invoice and terms to pubkey that is joining
self.send_admission_message(pubkey, &invoice_info).await?;
}
Ok(invoice_info)
}

View File

@@ -28,13 +28,15 @@ pub type PostgresPool = sqlx::pool::Pool<Postgres>;
pub struct PostgresRepo {
conn: PostgresPool,
conn_write: PostgresPool,
metrics: NostrMetrics,
}
impl PostgresRepo {
pub fn new(c: PostgresPool, m: NostrMetrics) -> PostgresRepo {
pub fn new(c: PostgresPool, cw: PostgresPool, m: NostrMetrics) -> PostgresRepo {
PostgresRepo {
conn: c,
conn_write: cw,
metrics: m,
}
}
@@ -81,17 +83,17 @@ async fn delete_expired(conn: PostgresPool) -> Result<u64> {
impl NostrRepo for PostgresRepo {
async fn start(&self) -> Result<()> {
// begin a cleanup task for expired events.
cleanup_expired(self.conn.clone(), Duration::from_secs(600)).await?;
cleanup_expired(self.conn_write.clone(), Duration::from_secs(600)).await?;
Ok(())
}
async fn migrate_up(&self) -> Result<usize> {
Ok(run_migrations(&self.conn).await?)
Ok(run_migrations(&self.conn_write).await?)
}
async fn write_event(&self, e: &Event) -> Result<u64> {
// start transaction
let mut tx = self.conn.begin().await?;
let mut tx = self.conn_write.begin().await?;
let start = Instant::now();
// get relevant fields from event and convert to blobs.
@@ -455,7 +457,7 @@ ON CONFLICT (id) DO NOTHING"#,
}
async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()> {
let mut tx = self.conn.begin().await?;
let mut tx = self.conn_write.begin().await?;
sqlx::query("DELETE FROM user_verification WHERE \"name\" = $1")
.bind(name)
@@ -481,7 +483,7 @@ ON CONFLICT (id) DO NOTHING"#,
sqlx::query("UPDATE user_verification SET verified_at = $1, fail_count = 0 WHERE id = $2")
.bind(Utc.timestamp_opt(verify_time as i64, 0).unwrap())
.bind(id as i64)
.execute(&self.conn)
.execute(&self.conn_write)
.await?;
info!("verification updated for {}", id);
@@ -491,7 +493,7 @@ ON CONFLICT (id) DO NOTHING"#,
async fn fail_verification(&self, id: u64) -> Result<()> {
sqlx::query("UPDATE user_verification SET failed_at = now(), fail_count = fail_count + 1 WHERE id = $1")
.bind(id as i64)
.execute(&self.conn)
.execute(&self.conn_write)
.await?;
Ok(())
}
@@ -499,7 +501,7 @@ ON CONFLICT (id) DO NOTHING"#,
async fn delete_verification(&self, id: u64) -> Result<()> {
sqlx::query("DELETE FROM user_verification WHERE id = $1")
.bind(id as i64)
.execute(&self.conn)
.execute(&self.conn_write)
.await?;
Ok(())
}
@@ -551,7 +553,7 @@ ON CONFLICT (id) DO NOTHING"#,
async fn create_account(&self, pub_key: &Keys) -> Result<bool> {
let pub_key = pub_key.public_key().to_string();
let mut tx = self.conn.begin().await?;
let mut tx = self.conn_write.begin().await?;
let result = sqlx::query("INSERT INTO account (pubkey, balance) VALUES ($1, 0);")
.bind(pub_key)
@@ -577,7 +579,7 @@ ON CONFLICT (id) DO NOTHING"#,
)
.bind(admission_cost as i64)
.bind(pub_key)
.execute(&self.conn)
.execute(&self.conn_write)
.await?;
Ok(())
}
@@ -594,7 +596,7 @@ ON CONFLICT (id) DO NOTHING"#,
let result = sqlx::query_as::<_, (bool, i64)>(query)
.bind(pub_key)
.fetch_optional(&self.conn)
.fetch_optional(&self.conn_write)
.await?
.ok_or(error::Error::SqlxError(RowNotFound))?;
@@ -614,14 +616,14 @@ ON CONFLICT (id) DO NOTHING"#,
sqlx::query("UPDATE account SET balance = balance + $1 WHERE pubkey = $2")
.bind(new_balance as i64)
.bind(pub_key)
.execute(&self.conn)
.execute(&self.conn_write)
.await?
}
false => {
sqlx::query("UPDATE account SET balance = balance - $1 WHERE pubkey = $2")
.bind(new_balance as i64)
.bind(pub_key)
.execute(&self.conn)
.execute(&self.conn_write)
.await?
}
};
@@ -631,7 +633,7 @@ ON CONFLICT (id) DO NOTHING"#,
/// Create invoice record
async fn create_invoice_record(&self, pub_key: &Keys, invoice_info: InvoiceInfo) -> Result<()> {
let pub_key = pub_key.public_key().to_string();
let mut tx = self.conn.begin().await?;
let mut tx = self.conn_write.begin().await?;
sqlx::query(
"INSERT INTO invoice (pubkey, payment_hash, amount, status, description, created_at, invoice) VALUES ($1, $2, $3, $4, $5, now(), $6)",
@@ -658,7 +660,7 @@ ON CONFLICT (id) DO NOTHING"#,
let (pubkey, prev_invoice_status, amount) =
sqlx::query_as::<_, (String, InvoiceStatus, i64)>(query)
.bind(payment_hash)
.fetch_optional(&self.conn)
.fetch_optional(&self.conn_write)
.await?
.ok_or(error::Error::SqlxError(RowNotFound))?;
@@ -672,14 +674,14 @@ ON CONFLICT (id) DO NOTHING"#,
sqlx::query(query)
.bind(&status)
.bind(payment_hash)
.execute(&self.conn)
.execute(&self.conn_write)
.await?;
if prev_invoice_status.eq(&InvoiceStatus::Unpaid) && status.eq(&InvoiceStatus::Paid) {
sqlx::query("UPDATE account SET balance = balance + $1 WHERE pubkey = $2")
.bind(amount)
.bind(&pubkey)
.execute(&self.conn)
.execute(&self.conn_write)
.await?;
}
@@ -698,7 +700,7 @@ LIMIT 1;
"#;
match sqlx::query_as::<_, (i64, String, String, String)>(query)
.bind(pubkey.public_key().to_string())
.fetch_optional(&self.conn)
.fetch_optional(&self.conn_write)
.await
.unwrap()
{
@@ -902,7 +904,7 @@ fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
}
push_and = true;
query
.push("e.created_at > ")
.push("e.created_at >= ")
.push_bind(Utc.timestamp_opt(f.since.unwrap() as i64, 0).unwrap());
}
@@ -913,7 +915,7 @@ fn query_from_filter(f: &ReqFilter) -> Option<QueryBuilder<Postgres>> {
}
push_and = true;
query
.push("e.created_at < ")
.push("e.created_at <= ")
.push_bind(Utc.timestamp_opt(f.until.unwrap() as i64, 0).unwrap());
}

View File

@@ -123,7 +123,7 @@ CREATE TABLE "tag" (
CREATE INDEX tag_event_id_idx ON tag USING btree (event_id, name);
CREATE INDEX tag_value_idx ON tag USING btree (value);
-- NIP-05 Verfication table
-- NIP-05 Verification table
CREATE TABLE "user_verification" (
id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY,
event_id bytea NOT NULL,

View File

@@ -62,7 +62,7 @@ impl SqliteRepo {
"writer",
settings,
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
1,
0,
2,
false,
);
@@ -70,7 +70,7 @@ impl SqliteRepo {
"maintenance",
settings,
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE,
1,
0,
2,
true,
);
@@ -842,7 +842,8 @@ impl NostrRepo for SqliteRepo {
async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result<String> {
let mut conn = self.write_pool.get()?;
let payment_hash = payment_hash.to_owned();
let pub_key = tokio::task::spawn_blocking(move || {
tokio::task::spawn_blocking(move || {
let tx = conn.transaction()?;
let pubkey: String;
{
@@ -884,8 +885,7 @@ impl NostrRepo for SqliteRepo {
let ok: Result<String> = Ok(pubkey);
ok
})
.await?;
pub_key
.await?
}
/// Get the most recent invoice for a given pubkey
@@ -978,7 +978,7 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
return (empty_query, empty_params, None);
}
// check if the index needs to be overriden
// check if the index needs to be overridden
let idx_name = override_index(f);
let idx_stmt = idx_name
.as_ref()
@@ -1009,7 +1009,7 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
params.push(Box::new(lower));
}
None => {
info!("Could not parse hex range from author {:?}", auth);
trace!("Could not parse hex range from author {:?}", auth);
}
}
}
@@ -1080,18 +1080,18 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
ks.iter().map(std::string::ToString::to_string).collect();
kind_clause = format!("AND kind IN ({})", str_kinds.join(", "));
} else {
kind_clause = format!("");
kind_clause = String::new();
};
if f.since.is_some() {
since_clause = format!("AND created_at > {}", f.since.unwrap());
since_clause = format!("AND created_at >= {}", f.since.unwrap());
} else {
since_clause = format!("");
since_clause = String::new();
};
// Query for timestamp
if f.until.is_some() {
until_clause = format!("AND created_at < {}", f.until.unwrap());
until_clause = format!("AND created_at <= {}", f.until.unwrap());
} else {
until_clause = format!("");
until_clause = String::new();
};
let tag_clause = format!(
@@ -1107,12 +1107,12 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
}
// Query for timestamp
if f.since.is_some() {
let created_clause = format!("created_at > {}", f.since.unwrap());
let created_clause = format!("created_at >= {}", f.since.unwrap());
filter_components.push(created_clause);
}
// Query for timestamp
if f.until.is_some() {
let until_clause = format!("created_at < {}", f.until.unwrap());
let until_clause = format!("created_at <= {}", f.until.unwrap());
filter_components.push(until_clause);
}
// never display hidden events
@@ -1199,9 +1199,15 @@ pub fn build_pool(
.test_on_check_out(true) // no noticeable performance hit
.min_idle(Some(min_size))
.max_size(max_size)
.idle_timeout(Some(Duration::from_secs(10)))
.max_lifetime(Some(Duration::from_secs(30)))
.build(manager)
.unwrap();
// retrieve a connection to ensure the startup statements run immediately
{
let _ = pool.get();
}
info!(
"Built a connection pool {:?} (min={}, max={})",
name, min_size, max_size

View File

@@ -19,7 +19,7 @@ PRAGMA foreign_keys = ON;
PRAGMA journal_size_limit = 32768;
PRAGMA temp_store = 2; -- use memory, not temp files
PRAGMA main.cache_size = 20000; -- 80MB max cache size per conn
pragma mmap_size = 17179869184; -- cap mmap at 16GB
pragma mmap_size = 0; -- disable mmap (default)
"##;
/// Latest database version

View File

@@ -827,7 +827,7 @@ pub fn start_server(settings: &Settings, shutdown_rx: MpscReceiver<()>) -> Resul
info!("listening on: {}", socket_addr);
// all client-submitted valid events are broadcast to every
// other client on this channel. This should be large enough
// to accomodate slower readers (messages are dropped if
// to accommodate slower readers (messages are dropped if
// clients can not keep up).
let (bcast_tx, _) = broadcast::channel::<Event>(broadcast_buffer_limit);
// validated events that need to be persisted are sent to the
@@ -1125,8 +1125,8 @@ async fn nostr_server(
let unspec = "<unspecified>".to_string();
info!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
let origin = client_info.origin.as_ref().unwrap_or_else(|| &unspec);
let user_agent = client_info.user_agent.as_ref().unwrap_or_else(|| &unspec);
let origin = client_info.origin.as_ref().unwrap_or(&unspec);
let user_agent = client_info.user_agent.as_ref().unwrap_or(&unspec);
info!(
"cid: {}, origin: {:?}, user-agent: {:?}",
cid, origin, user_agent
@@ -1175,14 +1175,12 @@ async fn nostr_server(
if query_result.event == "EOSE" {
let send_str = format!("[\"EOSE\",\"{subesc}\"]");
ws_stream.send(Message::Text(send_str)).await.ok();
} else {
if allowed_to_send(&query_result.event, &conn, &settings) {
metrics.sent_events.with_label_values(&["db"]).inc();
client_received_event_count += 1;
// send a result
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
ws_stream.send(Message::Text(send_str)).await.ok();
}
} else if allowed_to_send(&query_result.event, &conn, &settings) {
metrics.sent_events.with_label_values(&["db"]).inc();
client_received_event_count += 1;
// send a result
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
ws_stream.send(Message::Text(send_str)).await.ok();
}
},
// TODO: consider logging the LaggedRecv error
@@ -1278,7 +1276,7 @@ async fn nostr_server(
// check if the event is too far in the future.
} else if e.is_valid_timestamp(settings.options.reject_future_seconds) {
// Write this to the database.
let auth_pubkey = conn.auth_pubkey().and_then(|pubkey| hex::decode(&pubkey).ok());
let auth_pubkey = conn.auth_pubkey().and_then(|pubkey| hex::decode(pubkey).ok());
let submit_event = SubmittedEvent {
event: e.clone(),
notice_tx: notice_tx.clone(),
@@ -1307,7 +1305,7 @@ async fn nostr_server(
error!("AUTH command received, but relay_url is not set in the config file (cid: {})", cid);
},
Some(relay) => {
match conn.authenticate(&event, &relay) {
match conn.authenticate(&event, relay) {
Ok(_) => {
let pubkey = match conn.auth_pubkey() {
Some(k) => k.chars().take(8).collect(),
@@ -1317,7 +1315,7 @@ async fn nostr_server(
},
Err(e) => {
info!("authentication error: {} (cid: {})", e, cid);
ws_stream.send(make_notice_message(&Notice::message(format!("Authentication error: {e}")))).await.ok();
ws_stream.send(make_notice_message(&Notice::restricted(event.id, format!("authentication error: {e}").as_str()))).await.ok();
},
}
}

View File

@@ -188,7 +188,7 @@ impl<'de> Deserialize<'de> for Subscription {
D: Deserializer<'de>,
{
let mut v: Value = Deserialize::deserialize(deserializer)?;
// this shoud be a 3-or-more element array.
// this should be a 3-or-more element array.
// verify the first element is a String, REQ
// get the subscription from the second element.
// convert each of the remaining objects into filters
@@ -319,8 +319,8 @@ impl ReqFilter {
pub fn interested_in_event(&self, event: &Event) -> bool {
// self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
self.ids_match(event)
&& self.since.map_or(true, |t| event.created_at > t)
&& self.until.map_or(true, |t| event.created_at < t)
&& self.since.map_or(true, |t| event.created_at >= t)
&& self.until.map_or(true, |t| event.created_at <= t)
&& self.kind_match(event.kind)
&& (self.authors_match(event) || self.delegated_authors_match(event))
&& self.tag_match(event)

View File

@@ -50,15 +50,15 @@ mod tests {
#[test]
fn lower_hex() {
let hexstr = "abcd0123";
assert_eq!(is_lower_hex(hexstr), true);
assert!(is_lower_hex(hexstr));
}
#[test]
fn nip19() {
let hexkey = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d";
let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6";
assert_eq!(is_nip19(hexkey), false);
assert_eq!(is_nip19(nip19key), true);
assert!(!is_nip19(hexkey));
assert!(is_nip19(nip19key));
}
#[test]

View File

@@ -334,9 +334,9 @@ mod tests {
id: "0".to_owned(),
pubkey: public_key.to_hex(),
delegated_by: None,
created_at: created_at,
kind: kind,
tags: tags,
created_at,
kind,
tags,
content: "".to_owned(),
sig: "0".to_owned(),
tagidx: None,

View File

@@ -1,8 +1,10 @@
use anyhow::Result;
use futures::SinkExt;
use futures::StreamExt;
use std::thread;
use std::time::Duration;
use tokio_tungstenite::connect_async;
use tracing::info;
mod common;
#[tokio::test]
@@ -45,3 +47,33 @@ async fn relay_home_page() -> Result<()> {
let _res = relay.shutdown_tx.send(());
Ok(())
}
//#[tokio::test]
// Still inwork
async fn publish_test() -> Result<()> {
// get a relay and wait for startup
let relay = common::start_relay()?;
common::wait_for_healthy_relay(&relay).await?;
// open a non-secure websocket connection.
let (mut ws, _res) = connect_async(format!("ws://localhost:{}", relay.port)).await?;
// send a simple pre-made message
let simple_event = r#"["EVENT", {"content": "hello world","created_at": 1691239763,
"id":"f3ce6798d70e358213ebbeba4886bbdfacf1ecfd4f65ee5323ef5f404de32b86",
"kind": 1,
"pubkey": "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798",
"sig": "30ca29e8581eeee75bf838171dec818af5e6de2b74f5337de940f5cc91186534c0b20d6cf7ad1043a2c51dbd60b979447720a471d346322103c83f6cb66e4e98",
"tags": []}]"#;
ws.send(simple_event.into()).await?;
// get response from server, confirm it is an array with first element "OK"
let event_confirm = ws.next().await;
ws.close(None).await?;
info!("event confirmed: {:?}", event_confirm);
// open a new connection, and wait for some time to get the event.
let (mut sub_ws, _res) = connect_async(format!("ws://localhost:{}", relay.port)).await?;
let event_sub = r#"["REQ", "simple", {}]"#;
sub_ws.send(event_sub.into()).await?;
// read from subscription
let ws_next = sub_ws.next().await;
let _res = relay.shutdown_tx.send(());
Ok(())
}