mirror of
https://github.com/scsibug/nostr-rs-relay.git
synced 2025-09-01 03:40:46 -04:00
Compare commits
29 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
f4ecd43708 | ||
|
a8f465fdc8 | ||
|
1c14adc766 | ||
|
e894a86566 | ||
|
bedc378624 | ||
|
e1c2a6b758 | ||
|
990bb656e8 | ||
|
168cfc3b26 | ||
|
a36ad378f6 | ||
|
538d139ebf | ||
|
23f7730fea | ||
|
8aa1256254 | ||
|
9ed3391b46 | ||
|
4ad483090e | ||
|
9b351aab9b | ||
|
597749890e | ||
|
1d499cf12b | ||
|
ed3a6b9692 | ||
|
048199e30b | ||
|
414e83f696 | ||
|
225c8f762e | ||
|
887fc28ab2 | ||
|
294d3b99c3 | ||
|
53990672ae | ||
|
9c1b21cbfe | ||
|
2f63417646 | ||
|
3b25160852 | ||
|
34ad549cde | ||
|
f8b1fe5035 |
785
Cargo.lock
generated
785
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
10
Cargo.toml
10
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "nostr-rs-relay"
|
||||
version = "0.5.1"
|
||||
version = "0.6.1"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
@@ -9,12 +9,12 @@ env_logger = "^0.9"
|
||||
tokio = { version = "^1.16", features = ["full"] }
|
||||
futures = "^0.3"
|
||||
futures-util = "^0.3"
|
||||
tokio-tungstenite = "^0.16"
|
||||
tungstenite = "^0.16"
|
||||
tokio-tungstenite = "^0.17"
|
||||
tungstenite = "^0.17"
|
||||
thiserror = "^1"
|
||||
uuid = { version = "^0.8", features = ["v4"] }
|
||||
config = { version = "0.11", features = ["toml"] }
|
||||
bitcoin_hashes = { version = "^0.9", features = ["serde"] }
|
||||
config = { version = "^0.12", features = ["toml"] }
|
||||
bitcoin_hashes = { version = "^0.10", features = ["serde"] }
|
||||
secp256k1 = {version = "^0.21", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] }
|
||||
serde = { version = "^1.0", features = ["derive"] }
|
||||
serde_json = {version = "^1.0", features = ["preserve_order"]}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
FROM rust:1.58.1 as builder
|
||||
FROM docker.io/library/rust:1.62.0@sha256:8220e4fbb22a07b78e6472cdf8f5fb8913a04974c26b130177b73a8a64334541 as builder
|
||||
|
||||
RUN USER=root cargo new --bin nostr-rs-relay
|
||||
WORKDIR ./nostr-rs-relay
|
||||
@@ -12,7 +12,7 @@ COPY ./src ./src
|
||||
RUN rm ./target/release/deps/nostr*relay*
|
||||
RUN cargo build --release
|
||||
|
||||
FROM debian:bullseye-20220125-slim
|
||||
FROM docker.io/library/debian:bullseye-20220622-slim@sha256:89e9d812b34f393bddc3ff289f0036a3d9efc7e2f7289ec902c6427b69f39649
|
||||
ARG APP=/usr/src/app
|
||||
ARG APP_DATA=/usr/src/app/db
|
||||
RUN apt-get update \
|
||||
|
10
README.md
10
README.md
@@ -18,9 +18,11 @@ NIPs with a relay-specific implementation are listed here.
|
||||
- [x] NIP-02: Hide old contact list events
|
||||
- [ ] NIP-03: OpenTimestamps
|
||||
- [x] NIP-05: Mapping Nostr keys to DNS identifiers
|
||||
- [ ] NIP-09: Event deletion
|
||||
- [x] NIP-09: Event deletion
|
||||
- [x] NIP-11: Relay information document
|
||||
- [x] NIP-12: Generic tag search (_experimental_)
|
||||
- [x] NIP-15: End of stored events notice
|
||||
- [x] NIP-16: Replaceable and ephemeral events
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -79,8 +81,10 @@ termination, load balancing, and other features), see [Reverse
|
||||
Proxy](reverse-proxy.md).
|
||||
|
||||
## Dev Channel
|
||||
The current dev discussions for this project is happening at https://discord.gg/ufG6fH52Vk.
|
||||
Drop in to query any development related questions.
|
||||
|
||||
For development discussions, please feel free to use the [sourcehut
|
||||
mailing list](https://lists.sr.ht/~gheartsfield/nostr-rs-relay-devel).
|
||||
Or, drop by the [Nostr Telegram Channel](https://t.me/nostr_protocol).
|
||||
|
||||
License
|
||||
---
|
||||
|
@@ -63,8 +63,8 @@ reject_future_seconds = 1800
|
||||
#broadcast_buffer = 16384
|
||||
|
||||
# Event persistence buffer size, in number of events. This provides
|
||||
# backpressure to senders if writes are slow. Defaults to 16.
|
||||
#event_persist_buffer = 16
|
||||
# backpressure to senders if writes are slow.
|
||||
#event_persist_buffer = 4096
|
||||
|
||||
[authorization]
|
||||
# Pubkey addresses in this array are whitelisted for event publishing.
|
||||
|
@@ -1,4 +1,5 @@
|
||||
//! Configuration file and settings management
|
||||
use config::{Config, ConfigError, File};
|
||||
use lazy_static::lazy_static;
|
||||
use log::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -138,27 +139,29 @@ pub struct Settings {
|
||||
|
||||
impl Settings {
|
||||
pub fn new() -> Self {
|
||||
let d = Self::default();
|
||||
let default_settings = Self::default();
|
||||
// attempt to construct settings with file
|
||||
// Self::new_from_default(&d).unwrap_or(d)
|
||||
let from_file = Self::new_from_default(&d);
|
||||
let from_file = Self::new_from_default(&default_settings);
|
||||
match from_file {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
warn!("Error reading config file ({:?})", e);
|
||||
d
|
||||
default_settings
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn new_from_default(default: &Settings) -> Result<Self, config::ConfigError> {
|
||||
let config: config::Config = config::Config::new();
|
||||
let mut settings: Settings = config
|
||||
fn new_from_default(default: &Settings) -> Result<Self, ConfigError> {
|
||||
let builder = Config::builder();
|
||||
let config: Config = builder
|
||||
// use defaults
|
||||
.with_merged(config::Config::try_from(default).unwrap())?
|
||||
.add_source(Config::try_from(default)?)
|
||||
// override with file contents
|
||||
.with_merged(config::File::with_name("config"))?
|
||||
.try_into()?;
|
||||
.add_source(File::with_name("config"))
|
||||
.build()?
|
||||
.try_into()
|
||||
.unwrap();
|
||||
let mut settings: Settings = config.try_deserialize()?;
|
||||
// ensure connection pool size is logical
|
||||
if settings.database.min_conn > settings.database.max_conn {
|
||||
panic!(
|
||||
|
410
src/db.rs
410
src/db.rs
@@ -7,6 +7,7 @@ use crate::hexrange::hex_range;
|
||||
use crate::hexrange::HexSearch;
|
||||
use crate::nip05;
|
||||
use crate::schema::{upgrade_db, STARTUP_SQL};
|
||||
use crate::subscription::ReqFilter;
|
||||
use crate::subscription::Subscription;
|
||||
use crate::utils::is_hex;
|
||||
use governor::clock::Clock;
|
||||
@@ -207,30 +208,41 @@ pub async fn db_writer(
|
||||
}
|
||||
// TODO: cache recent list of authors to remove a DB call.
|
||||
let start = Instant::now();
|
||||
match write_event(&mut pool.get()?, &event) {
|
||||
Ok(updated) => {
|
||||
if updated == 0 {
|
||||
trace!("ignoring duplicate event");
|
||||
} else {
|
||||
info!(
|
||||
"persisted event {:?} from {:?} in {:?}",
|
||||
event.get_event_id_prefix(),
|
||||
event.get_author_prefix(),
|
||||
start.elapsed()
|
||||
);
|
||||
event_write = true;
|
||||
// send this out to all clients
|
||||
bcast_tx.send(event.clone()).ok();
|
||||
if event.kind >= 20000 && event.kind < 30000 {
|
||||
info!(
|
||||
"published ephemeral event {:?} from {:?} in {:?}",
|
||||
event.get_event_id_prefix(),
|
||||
event.get_author_prefix(),
|
||||
start.elapsed()
|
||||
);
|
||||
bcast_tx.send(event.clone()).ok();
|
||||
event_write = true
|
||||
} else {
|
||||
match write_event(&mut pool.get()?, &event) {
|
||||
Ok(updated) => {
|
||||
if updated == 0 {
|
||||
trace!("ignoring duplicate event");
|
||||
} else {
|
||||
info!(
|
||||
"persisted event {:?} from {:?} in {:?}",
|
||||
event.get_event_id_prefix(),
|
||||
event.get_author_prefix(),
|
||||
start.elapsed()
|
||||
);
|
||||
event_write = true;
|
||||
// send this out to all clients
|
||||
bcast_tx.send(event.clone()).ok();
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("event insert failed: {:?}", err);
|
||||
notice_tx
|
||||
.try_send(
|
||||
"relay experienced an error trying to publish the latest event"
|
||||
.to_owned(),
|
||||
)
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("event insert failed: {:?}", err);
|
||||
notice_tx
|
||||
.try_send(
|
||||
"relay experienced an error trying to publish the latest event"
|
||||
.to_owned(),
|
||||
)
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -302,35 +314,45 @@ pub fn write_event(conn: &mut PooledConnection, e: &Event) -> Result<usize> {
|
||||
}
|
||||
}
|
||||
}
|
||||
// if this event is for a metadata update, hide every other kind=0
|
||||
// event from the same author that was issued earlier than this.
|
||||
if e.kind == 0 {
|
||||
// if this event is replaceable update, hide every other replaceable
|
||||
// event with the same kind from the same author that was issued
|
||||
// earlier than this.
|
||||
if e.kind == 0 || e.kind == 3 || (e.kind >= 10000 && e.kind < 20000) {
|
||||
let update_count = tx.execute(
|
||||
"UPDATE event SET hidden=TRUE WHERE id!=? AND kind=0 AND author=? AND created_at <= ? and hidden!=TRUE",
|
||||
params![ev_id, hex::decode(&e.pubkey).ok(), e.created_at],
|
||||
"UPDATE event SET hidden=TRUE WHERE id!=? AND kind=? AND author=? AND created_at <= ? and hidden!=TRUE",
|
||||
params![ev_id, e.kind, hex::decode(&e.pubkey).ok(), e.created_at],
|
||||
)?;
|
||||
if update_count > 0 {
|
||||
info!(
|
||||
"hid {} older metadata events for author {:?}",
|
||||
"hid {} older replaceable kind {} events for author {:?}",
|
||||
update_count,
|
||||
e.kind,
|
||||
e.get_author_prefix()
|
||||
);
|
||||
}
|
||||
}
|
||||
// if this event is for a contact update, hide every other kind=3
|
||||
// event from the same author that was issued earlier than this.
|
||||
if e.kind == 3 {
|
||||
let update_count = tx.execute(
|
||||
"UPDATE event SET hidden=TRUE WHERE id!=? AND kind=3 AND author=? AND created_at <= ? and hidden!=TRUE",
|
||||
params![ev_id, hex::decode(&e.pubkey).ok(), e.created_at],
|
||||
)?;
|
||||
if update_count > 0 {
|
||||
info!(
|
||||
"hid {} older contact events for author {:?}",
|
||||
update_count,
|
||||
e.get_author_prefix()
|
||||
);
|
||||
}
|
||||
// if this event is a deletion, hide the referenced events from the same author.
|
||||
if e.kind == 5 {
|
||||
let event_candidates = e.tag_values_by_name("e");
|
||||
let mut params: Vec<Box<dyn ToSql>> = vec![];
|
||||
// first parameter will be author
|
||||
params.push(Box::new(hex::decode(&e.pubkey)?));
|
||||
event_candidates
|
||||
.iter()
|
||||
.filter(|x| is_hex(x) && x.len() == 64)
|
||||
.filter_map(|x| hex::decode(x).ok())
|
||||
.for_each(|x| params.push(Box::new(x)));
|
||||
let query = format!(
|
||||
"UPDATE event SET hidden=TRUE WHERE author=? AND event_hash IN ({})",
|
||||
repeat_vars(params.len() - 1)
|
||||
);
|
||||
let mut stmt = tx.prepare(&query)?;
|
||||
let update_count = stmt.execute(rusqlite::params_from_iter(params))?;
|
||||
info!(
|
||||
"hid {} deleted events for author {:?}",
|
||||
update_count,
|
||||
e.get_author_prefix()
|
||||
);
|
||||
}
|
||||
tx.commit()?;
|
||||
Ok(ins_count)
|
||||
@@ -356,142 +378,156 @@ fn repeat_vars(count: usize) -> String {
|
||||
s
|
||||
}
|
||||
|
||||
/// Create a dynamic SQL query string and params from a subscription.
|
||||
fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
|
||||
/// Create a dynamic SQL subquery and params from a subscription filter.
|
||||
fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>) {
|
||||
// build a dynamic SQL query. all user-input is either an integer
|
||||
// (sqli-safe), or a string that is filtered to only contain
|
||||
// hexadecimal characters. Strings that require escaping (tag
|
||||
// names/values) use parameters.
|
||||
let mut query =
|
||||
"SELECT DISTINCT(e.content) FROM event e LEFT JOIN tag t ON e.id=t.event_id ".to_owned();
|
||||
// parameters
|
||||
"SELECT DISTINCT(e.content), e.created_at FROM event e LEFT JOIN tag t ON e.id=t.event_id "
|
||||
.to_owned();
|
||||
// query parameters for SQLite
|
||||
let mut params: Vec<Box<dyn ToSql>> = vec![];
|
||||
|
||||
// for every filter in the subscription, generate a where clause
|
||||
let mut filter_clauses: Vec<String> = Vec::new();
|
||||
for f in sub.filters.iter() {
|
||||
// individual filter components
|
||||
let mut filter_components: Vec<String> = Vec::new();
|
||||
// Query for "authors", allowing prefix matches
|
||||
if let Some(authvec) = &f.authors {
|
||||
// take each author and convert to a hexsearch
|
||||
let mut auth_searches: Vec<String> = vec![];
|
||||
for auth in authvec {
|
||||
match hex_range(auth) {
|
||||
Some(HexSearch::Exact(ex)) => {
|
||||
auth_searches.push("author=?".to_owned());
|
||||
params.push(Box::new(ex));
|
||||
}
|
||||
Some(HexSearch::Range(lower, upper)) => {
|
||||
auth_searches.push("(author>? AND author<?)".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
params.push(Box::new(upper));
|
||||
}
|
||||
Some(HexSearch::LowerOnly(lower)) => {
|
||||
auth_searches.push("author>?".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
}
|
||||
None => {
|
||||
info!("Could not parse hex range from author {:?}", auth);
|
||||
}
|
||||
// individual filter components (single conditions such as an author or event ID)
|
||||
let mut filter_components: Vec<String> = Vec::new();
|
||||
// Query for "authors", allowing prefix matches
|
||||
if let Some(authvec) = &f.authors {
|
||||
// take each author and convert to a hexsearch
|
||||
let mut auth_searches: Vec<String> = vec![];
|
||||
for auth in authvec {
|
||||
match hex_range(auth) {
|
||||
Some(HexSearch::Exact(ex)) => {
|
||||
auth_searches.push("author=?".to_owned());
|
||||
params.push(Box::new(ex));
|
||||
}
|
||||
Some(HexSearch::Range(lower, upper)) => {
|
||||
auth_searches.push("(author>? AND author<?)".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
params.push(Box::new(upper));
|
||||
}
|
||||
Some(HexSearch::LowerOnly(lower)) => {
|
||||
auth_searches.push("author>?".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
}
|
||||
None => {
|
||||
info!("Could not parse hex range from author {:?}", auth);
|
||||
}
|
||||
}
|
||||
let authors_clause = format!("({})", auth_searches.join(" OR "));
|
||||
filter_components.push(authors_clause);
|
||||
}
|
||||
// Query for Kind
|
||||
if let Some(ks) = &f.kinds {
|
||||
// kind is number, no escaping needed
|
||||
let str_kinds: Vec<String> = ks.iter().map(|x| x.to_string()).collect();
|
||||
let kind_clause = format!("kind IN ({})", str_kinds.join(", "));
|
||||
filter_components.push(kind_clause);
|
||||
}
|
||||
// Query for event, allowing prefix matches
|
||||
if let Some(idvec) = &f.ids {
|
||||
// take each author and convert to a hexsearch
|
||||
let mut id_searches: Vec<String> = vec![];
|
||||
for id in idvec {
|
||||
match hex_range(id) {
|
||||
Some(HexSearch::Exact(ex)) => {
|
||||
id_searches.push("event_hash=?".to_owned());
|
||||
params.push(Box::new(ex));
|
||||
}
|
||||
Some(HexSearch::Range(lower, upper)) => {
|
||||
id_searches.push("(event_hash>? AND event_hash<?)".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
params.push(Box::new(upper));
|
||||
}
|
||||
Some(HexSearch::LowerOnly(lower)) => {
|
||||
id_searches.push("event_hash>?".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
}
|
||||
None => {
|
||||
info!("Could not parse hex range from id {:?}", id);
|
||||
}
|
||||
let authors_clause = format!("({})", auth_searches.join(" OR "));
|
||||
filter_components.push(authors_clause);
|
||||
}
|
||||
// Query for Kind
|
||||
if let Some(ks) = &f.kinds {
|
||||
// kind is number, no escaping needed
|
||||
let str_kinds: Vec<String> = ks.iter().map(|x| x.to_string()).collect();
|
||||
let kind_clause = format!("kind IN ({})", str_kinds.join(", "));
|
||||
filter_components.push(kind_clause);
|
||||
}
|
||||
// Query for event, allowing prefix matches
|
||||
if let Some(idvec) = &f.ids {
|
||||
// take each author and convert to a hexsearch
|
||||
let mut id_searches: Vec<String> = vec![];
|
||||
for id in idvec {
|
||||
match hex_range(id) {
|
||||
Some(HexSearch::Exact(ex)) => {
|
||||
id_searches.push("event_hash=?".to_owned());
|
||||
params.push(Box::new(ex));
|
||||
}
|
||||
Some(HexSearch::Range(lower, upper)) => {
|
||||
id_searches.push("(event_hash>? AND event_hash<?)".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
params.push(Box::new(upper));
|
||||
}
|
||||
Some(HexSearch::LowerOnly(lower)) => {
|
||||
id_searches.push("event_hash>?".to_owned());
|
||||
params.push(Box::new(lower));
|
||||
}
|
||||
None => {
|
||||
info!("Could not parse hex range from id {:?}", id);
|
||||
}
|
||||
}
|
||||
let id_clause = format!("({})", id_searches.join(" OR "));
|
||||
filter_components.push(id_clause);
|
||||
}
|
||||
// Query for tags
|
||||
if let Some(map) = &f.tags {
|
||||
for (key, val) in map.iter() {
|
||||
let mut str_vals: Vec<Box<dyn ToSql>> = vec![];
|
||||
let mut blob_vals: Vec<Box<dyn ToSql>> = vec![];
|
||||
for v in val {
|
||||
if is_hex(v) {
|
||||
if let Ok(h) = hex::decode(&v) {
|
||||
blob_vals.push(Box::new(h));
|
||||
}
|
||||
} else {
|
||||
str_vals.push(Box::new(v.to_owned()));
|
||||
let id_clause = format!("({})", id_searches.join(" OR "));
|
||||
filter_components.push(id_clause);
|
||||
}
|
||||
// Query for tags
|
||||
if let Some(map) = &f.tags {
|
||||
for (key, val) in map.iter() {
|
||||
let mut str_vals: Vec<Box<dyn ToSql>> = vec![];
|
||||
let mut blob_vals: Vec<Box<dyn ToSql>> = vec![];
|
||||
for v in val {
|
||||
if is_hex(v) {
|
||||
if let Ok(h) = hex::decode(&v) {
|
||||
blob_vals.push(Box::new(h));
|
||||
}
|
||||
} else {
|
||||
str_vals.push(Box::new(v.to_owned()));
|
||||
}
|
||||
// create clauses with "?" params for each tag value being searched
|
||||
let str_clause = format!("value IN ({})", repeat_vars(str_vals.len()));
|
||||
let blob_clause = format!("value_hex IN ({})", repeat_vars(blob_vals.len()));
|
||||
let tag_clause = format!("(name=? AND ({} OR {}))", str_clause, blob_clause);
|
||||
// add the tag name as the first parameter
|
||||
params.push(Box::new(key.to_owned()));
|
||||
// add all tag values that are plain strings as params
|
||||
params.append(&mut str_vals);
|
||||
// add all tag values that are blobs as params
|
||||
params.append(&mut blob_vals);
|
||||
filter_components.push(tag_clause);
|
||||
}
|
||||
}
|
||||
// Query for timestamp
|
||||
if f.since.is_some() {
|
||||
let created_clause = format!("created_at > {}", f.since.unwrap());
|
||||
filter_components.push(created_clause);
|
||||
}
|
||||
// Query for timestamp
|
||||
if f.until.is_some() {
|
||||
let until_clause = format!("created_at < {}", f.until.unwrap());
|
||||
filter_components.push(until_clause);
|
||||
}
|
||||
|
||||
// combine all clauses, and add to filter_clauses
|
||||
if !filter_components.is_empty() {
|
||||
let mut fc = "( ".to_owned();
|
||||
fc.push_str(&filter_components.join(" AND "));
|
||||
fc.push_str(" )");
|
||||
filter_clauses.push(fc);
|
||||
// create clauses with "?" params for each tag value being searched
|
||||
let str_clause = format!("value IN ({})", repeat_vars(str_vals.len()));
|
||||
let blob_clause = format!("value_hex IN ({})", repeat_vars(blob_vals.len()));
|
||||
let tag_clause = format!("(name=? AND ({} OR {}))", str_clause, blob_clause);
|
||||
// add the tag name as the first parameter
|
||||
params.push(Box::new(key.to_owned()));
|
||||
// add all tag values that are plain strings as params
|
||||
params.append(&mut str_vals);
|
||||
// add all tag values that are blobs as params
|
||||
params.append(&mut blob_vals);
|
||||
filter_components.push(tag_clause);
|
||||
}
|
||||
}
|
||||
|
||||
// Query for timestamp
|
||||
if f.since.is_some() {
|
||||
let created_clause = format!("created_at > {}", f.since.unwrap());
|
||||
filter_components.push(created_clause);
|
||||
}
|
||||
// Query for timestamp
|
||||
if f.until.is_some() {
|
||||
let until_clause = format!("created_at < {}", f.until.unwrap());
|
||||
filter_components.push(until_clause);
|
||||
}
|
||||
// never display hidden events
|
||||
query.push_str(" WHERE hidden!=TRUE ");
|
||||
|
||||
// combine all filters with OR clauses, if any exist
|
||||
if !filter_clauses.is_empty() {
|
||||
query.push_str(" AND (");
|
||||
query.push_str(&filter_clauses.join(" OR "));
|
||||
query.push_str(") ");
|
||||
query.push_str(" WHERE hidden!=TRUE");
|
||||
// build filter component conditions
|
||||
if !filter_components.is_empty() {
|
||||
query.push_str(" AND ");
|
||||
query.push_str(&filter_components.join(" AND "));
|
||||
}
|
||||
// add order clause
|
||||
query.push_str(" ORDER BY created_at ASC");
|
||||
debug!("query string: {}", query);
|
||||
// Apply per-filter limit to this subquery.
|
||||
// The use of a LIMIT implies a DESC order, to capture only the most recent events.
|
||||
if let Some(lim) = f.limit {
|
||||
query.push_str(&format!(" ORDER BY e.created_at DESC LIMIT {}", lim))
|
||||
} else {
|
||||
query.push_str(" ORDER BY e.created_at ASC")
|
||||
}
|
||||
(query, params)
|
||||
}
|
||||
|
||||
/// Create a dynamic SQL query string and params from a subscription.
|
||||
fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
|
||||
// build a dynamic SQL query for an entire subscription, based on
|
||||
// SQL subqueries for filters.
|
||||
let mut subqueries: Vec<String> = Vec::new();
|
||||
// subquery params
|
||||
let mut params: Vec<Box<dyn ToSql>> = vec![];
|
||||
// for every filter in the subscription, generate a subquery
|
||||
for f in sub.filters.iter() {
|
||||
let (f_subquery, mut f_params) = query_from_filter(&f);
|
||||
subqueries.push(f_subquery);
|
||||
params.append(&mut f_params);
|
||||
}
|
||||
// encapsulate subqueries into select statements
|
||||
let subqueries_selects: Vec<String> = subqueries
|
||||
.iter()
|
||||
.map(|s| {
|
||||
return format!("SELECT content, created_at FROM ({})", s);
|
||||
})
|
||||
.collect();
|
||||
let query: String = subqueries_selects.join(" UNION ");
|
||||
info!("final query string: {}", query);
|
||||
(query, params)
|
||||
}
|
||||
|
||||
@@ -503,7 +539,7 @@ fn query_from_sub(sub: &Subscription) -> (String, Vec<Box<dyn ToSql>>) {
|
||||
/// query is immediately aborted.
|
||||
pub async fn db_query(
|
||||
sub: Subscription,
|
||||
conn: PooledConnection,
|
||||
pool: SqlitePool,
|
||||
query_tx: tokio::sync::mpsc::Sender<QueryResult>,
|
||||
mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>,
|
||||
) {
|
||||
@@ -513,29 +549,49 @@ pub async fn db_query(
|
||||
let start = Instant::now();
|
||||
// generate SQL query
|
||||
let (q, p) = query_from_sub(&sub);
|
||||
// execute the query. Don't cache, since queries vary so much.
|
||||
let mut stmt = conn.prepare(&q)?;
|
||||
let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?;
|
||||
while let Some(row) = event_rows.next()? {
|
||||
// check if this is still active (we could do this every N rows)
|
||||
if abandon_query_rx.try_recv().is_ok() {
|
||||
debug!("query aborted");
|
||||
return Ok(());
|
||||
debug!("SQL generated in {:?}", start.elapsed());
|
||||
// show pool stats
|
||||
debug!("DB pool stats: {:?}", pool.state());
|
||||
let start = Instant::now();
|
||||
if let Ok(conn) = pool.get() {
|
||||
// execute the query. Don't cache, since queries vary so much.
|
||||
let mut stmt = conn.prepare(&q)?;
|
||||
let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?;
|
||||
let mut first_result = true;
|
||||
while let Some(row) = event_rows.next()? {
|
||||
if first_result {
|
||||
debug!("time to first result: {:?}", start.elapsed());
|
||||
first_result = false;
|
||||
}
|
||||
// check if this is still active
|
||||
// TODO: check every N rows
|
||||
if abandon_query_rx.try_recv().is_ok() {
|
||||
debug!("query aborted");
|
||||
return Ok(());
|
||||
}
|
||||
row_count += 1;
|
||||
let event_json = row.get(0)?;
|
||||
query_tx
|
||||
.blocking_send(QueryResult {
|
||||
sub_id: sub.get_id(),
|
||||
event: event_json,
|
||||
})
|
||||
.ok();
|
||||
}
|
||||
row_count += 1;
|
||||
let event_json = row.get(0)?;
|
||||
query_tx
|
||||
.blocking_send(QueryResult {
|
||||
sub_id: sub.get_id(),
|
||||
event: event_json,
|
||||
event: "EOSE".to_string(),
|
||||
})
|
||||
.ok();
|
||||
debug!(
|
||||
"query completed ({} rows) in {:?}",
|
||||
row_count,
|
||||
start.elapsed()
|
||||
);
|
||||
} else {
|
||||
warn!("Could not get a database connection for querying");
|
||||
}
|
||||
debug!(
|
||||
"query completed ({} rows) in {:?}",
|
||||
row_count,
|
||||
start.elapsed()
|
||||
);
|
||||
let ok: Result<()> = Ok(());
|
||||
ok
|
||||
});
|
||||
|
@@ -48,6 +48,8 @@ pub enum Error {
|
||||
JoinError,
|
||||
#[error("Hyper Client error")]
|
||||
HyperError(hyper::Error),
|
||||
#[error("Hex encoding error")]
|
||||
HexError(hex::FromHexError),
|
||||
#[error("Unknown/Undocumented")]
|
||||
UnknownError,
|
||||
}
|
||||
@@ -58,6 +60,12 @@ pub enum Error {
|
||||
// }
|
||||
//}
|
||||
|
||||
impl From<hex::FromHexError> for Error {
|
||||
fn from(h: hex::FromHexError) -> Self {
|
||||
Error::HexError(h)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<hyper::Error> for Error {
|
||||
fn from(h: hyper::Error) -> Self {
|
||||
Error::HyperError(h)
|
||||
|
42
src/event.rs
42
src/event.rs
@@ -124,6 +124,16 @@ impl Event {
|
||||
self.pubkey.chars().take(8).collect()
|
||||
}
|
||||
|
||||
/// Retrieve tag values
|
||||
pub fn tag_values_by_name(&self, tag_name: &str) -> Vec<String> {
|
||||
self.tags
|
||||
.iter()
|
||||
.filter(|x| x.len() > 1)
|
||||
.filter(|x| x.get(0).unwrap() == tag_name)
|
||||
.map(|x| x.get(1).unwrap().to_owned())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Check if this event has a valid signature.
|
||||
fn is_valid(&self) -> bool {
|
||||
// TODO: return a Result with a reason for invalid events
|
||||
@@ -136,7 +146,7 @@ impl Event {
|
||||
if curr_time + (allowable_future as u64) < self.created_at {
|
||||
let delta = self.created_at - curr_time;
|
||||
debug!(
|
||||
"Event is too far in the future ({} seconds), rejecting",
|
||||
"event is too far in the future ({} seconds), rejecting",
|
||||
delta
|
||||
);
|
||||
return false;
|
||||
@@ -169,11 +179,11 @@ impl Event {
|
||||
let verify = SECP.verify_schnorr(&sig, &msg, &pubkey);
|
||||
matches!(verify, Ok(()))
|
||||
} else {
|
||||
debug!("Client sent malformed pubkey");
|
||||
debug!("client sent malformed pubkey");
|
||||
false
|
||||
}
|
||||
} else {
|
||||
info!("Error converting digest to secp256k1 message");
|
||||
info!("error converting digest to secp256k1 message");
|
||||
false
|
||||
}
|
||||
}
|
||||
@@ -332,6 +342,32 @@ mod tests {
|
||||
assert_eq!(c, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_tag_select() {
|
||||
let e = Event {
|
||||
id: "999".to_owned(),
|
||||
pubkey: "012345".to_owned(),
|
||||
created_at: 501234,
|
||||
kind: 1,
|
||||
tags: vec![
|
||||
vec!["j".to_owned(), "abc".to_owned()],
|
||||
vec!["e".to_owned(), "foo".to_owned()],
|
||||
vec!["e".to_owned(), "bar".to_owned()],
|
||||
vec!["e".to_owned(), "baz".to_owned()],
|
||||
vec![
|
||||
"p".to_owned(),
|
||||
"aaaa".to_owned(),
|
||||
"ws://example.com".to_owned(),
|
||||
],
|
||||
],
|
||||
content: "this is a test".to_owned(),
|
||||
sig: "abcde".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
let v = e.tag_values_by_name("e");
|
||||
assert_eq!(v, vec!["foo", "bar", "baz"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_canonical_with_tags() {
|
||||
let e = Event {
|
||||
|
@@ -80,6 +80,7 @@ pub fn hex_range(s: &str) -> Option<HexSearch> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::error::Result;
|
||||
|
||||
#[test]
|
||||
fn hex_range_exact() -> Result<()> {
|
||||
|
@@ -35,7 +35,7 @@ impl From<config::Info> for RelayInfo {
|
||||
description: i.description,
|
||||
pubkey: i.pubkey,
|
||||
contact: i.contact,
|
||||
supported_nips: Some(vec![1, 2, 11]),
|
||||
supported_nips: Some(vec![1, 2, 11, 15, 16]),
|
||||
software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()),
|
||||
version: CARGO_PKG_VERSION.map(|x| x.to_owned()),
|
||||
}
|
||||
|
73
src/main.rs
73
src/main.rs
@@ -21,6 +21,7 @@ use nostr_rs_relay::info::RelayInfo;
|
||||
use nostr_rs_relay::nip05;
|
||||
use nostr_rs_relay::subscription::Subscription;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::Infallible;
|
||||
use std::env;
|
||||
@@ -61,7 +62,7 @@ async fn handle_web_request(
|
||||
) {
|
||||
// Request for / as websocket
|
||||
("/", true) => {
|
||||
debug!("websocket with upgrade request");
|
||||
trace!("websocket with upgrade request");
|
||||
//assume request is a handshake, so create the handshake response
|
||||
let response = match handshake::server::create_response_with_body(&request, || {
|
||||
Body::empty()
|
||||
@@ -355,6 +356,11 @@ fn convert_to_msg(msg: String) -> Result<NostrMessage> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Turn a string into a NOTICE message ready to send over a WebSocket
|
||||
fn make_notice_message(msg: &str) -> Message {
|
||||
Message::text(json!(["NOTICE", msg]).to_string())
|
||||
}
|
||||
|
||||
/// Handle new client connections. This runs through an event loop
|
||||
/// for all client communication.
|
||||
async fn nostr_server(
|
||||
@@ -375,11 +381,7 @@ async fn nostr_server(
|
||||
// Create channel for receiving NOTICEs
|
||||
let (notice_tx, mut notice_rx) = mpsc::channel::<String>(32);
|
||||
|
||||
// maintain a hashmap of a oneshot channel for active subscriptions.
|
||||
// when these subscriptions are cancelled, make a message
|
||||
// available to the executing query so it knows to stop.
|
||||
|
||||
// last time this client sent data
|
||||
// last time this client sent data (message, ping, etc.)
|
||||
let mut last_message_time = Instant::now();
|
||||
|
||||
// ping interval (every 5 minutes)
|
||||
@@ -391,7 +393,11 @@ async fn nostr_server(
|
||||
let start = tokio::time::Instant::now() + default_ping_dur;
|
||||
let mut ping_interval = tokio::time::interval_at(start, default_ping_dur);
|
||||
|
||||
// maintain a hashmap of a oneshot channel for active subscriptions.
|
||||
// when these subscriptions are cancelled, make a message
|
||||
// available to the executing query so it knows to stop.
|
||||
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
|
||||
|
||||
// for stats, keep track of how many events the client published,
|
||||
// and how many it received from queries.
|
||||
let mut client_published_event_count: usize = 0;
|
||||
@@ -414,16 +420,20 @@ async fn nostr_server(
|
||||
ws_stream.send(Message::Ping(Vec::new())).await.ok();
|
||||
},
|
||||
Some(notice_msg) = notice_rx.recv() => {
|
||||
let n = notice_msg.to_string().replace("\"", "");
|
||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", n))).await.ok();
|
||||
ws_stream.send(make_notice_message(¬ice_msg)).await.ok();
|
||||
},
|
||||
Some(query_result) = query_rx.recv() => {
|
||||
// database informed us of a query result we asked for
|
||||
client_received_event_count += 1;
|
||||
// send a result
|
||||
let subesc = query_result.sub_id.replace("\"", "");
|
||||
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
|
||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||
if query_result.event == "EOSE" {
|
||||
let send_str = format!("[\"EOSE\",\"{}\"]", subesc);
|
||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||
} else {
|
||||
client_received_event_count += 1;
|
||||
// send a result
|
||||
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
|
||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||
}
|
||||
},
|
||||
// TODO: consider logging the LaggedRecv error
|
||||
Ok(global_event) = bcast_rx.recv() => {
|
||||
@@ -455,20 +465,32 @@ async fn nostr_server(
|
||||
convert_to_msg(m)
|
||||
},
|
||||
Some(Ok(Message::Binary(_))) => {
|
||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "binary messages are not accepted"))).await.ok();
|
||||
ws_stream.send(make_notice_message("binary messages are not accepted")).await.ok();
|
||||
continue;
|
||||
},
|
||||
Some(Ok(Message::Ping(_))) | Some(Ok(Message::Pong(_))) => {
|
||||
// get a ping/pong, ignore
|
||||
// get a ping/pong, ignore. tungstenite will
|
||||
// send responses automatically.
|
||||
continue;
|
||||
},
|
||||
None | Some(Ok(Message::Close(_))) | Some(Err(WsError::AlreadyClosed)) | Some(Err(WsError::ConnectionClosed)) => {
|
||||
debug!("normal websocket close from client: {:?}",cid);
|
||||
None |
|
||||
Some(Ok(Message::Close(_))) |
|
||||
Some(Err(WsError::AlreadyClosed)) |
|
||||
Some(Err(WsError::ConnectionClosed)) |
|
||||
Some(Err(WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake)))
|
||||
=> {
|
||||
debug!("websocket close from client: {:?}",cid);
|
||||
break;
|
||||
},
|
||||
Some(Err(WsError::Io(e))) => {
|
||||
// IO errors are considered fatal
|
||||
warn!("IO error (client: {:?}): {:?}", cid, e);
|
||||
break;
|
||||
}
|
||||
x => {
|
||||
info!("message was: {:?} (ignoring)", x);
|
||||
continue;
|
||||
// default condition on error is to close the client connection
|
||||
info!("unknown error (client: {:?}): {:?} (closing conn)", cid, x);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -489,7 +511,7 @@ async fn nostr_server(
|
||||
},
|
||||
Err(_) => {
|
||||
info!("client {:?} sent an invalid event", cid);
|
||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "event was invalid"))).await.ok();
|
||||
ws_stream.send(make_notice_message("event was invalid")).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -507,14 +529,11 @@ async fn nostr_server(
|
||||
previous_query.send(()).ok();
|
||||
}
|
||||
// start a database query
|
||||
// show pool stats
|
||||
debug!("DB pool stats: {:?}", pool.state());
|
||||
db::db_query(s, pool.get().expect("could not get connection"), query_tx.clone(), abandon_query_rx).await;
|
||||
db::db_query(s, pool.clone(), query_tx.clone(), abandon_query_rx).await;
|
||||
},
|
||||
Err(e) => {
|
||||
info!("Subscription error: {}", e);
|
||||
let s = e.to_string().replace("\"", "");
|
||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", s))).await.ok();
|
||||
ws_stream.send(make_notice_message(&e.to_string())).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -535,7 +554,7 @@ async fn nostr_server(
|
||||
},
|
||||
Err(_) => {
|
||||
info!("invalid command ignored");
|
||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "could not parse command"))).await.ok();
|
||||
ws_stream.send(make_notice_message("could not parse command")).await.ok();
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -545,11 +564,11 @@ async fn nostr_server(
|
||||
}
|
||||
Err(Error::EventMaxLengthError(s)) => {
|
||||
info!("client {:?} sent event larger ({} bytes) than max size", cid, s);
|
||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "event exceeded max size"))).await.ok();
|
||||
ws_stream.send(make_notice_message("event exceeded max size")).await.ok();
|
||||
},
|
||||
Err(Error::ProtoParseError) => {
|
||||
info!("client {:?} sent event that could not be parsed", cid);
|
||||
ws_stream.send(Message::Text(format!("[\"NOTICE\",\"{}\"]", "could not parse command"))).await.ok();
|
||||
ws_stream.send(make_notice_message("could not parse command")).await.ok();
|
||||
},
|
||||
Err(e) => {
|
||||
info!("got non-fatal error from client: {:?}, error: {:?}", cid, e);
|
||||
|
@@ -31,6 +31,8 @@ pub struct ReqFilter {
|
||||
pub until: Option<u64>,
|
||||
/// List of author public keys
|
||||
pub authors: Option<Vec<String>>,
|
||||
/// Limit number of results
|
||||
pub limit: Option<u64>,
|
||||
/// Set of tags
|
||||
#[serde(skip)]
|
||||
pub tags: Option<HashMap<String, HashSet<String>>>,
|
||||
@@ -54,6 +56,7 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
||||
since: None,
|
||||
until: None,
|
||||
authors: None,
|
||||
limit: None,
|
||||
tags: None,
|
||||
};
|
||||
let mut ts = None;
|
||||
@@ -68,6 +71,8 @@ impl<'de> Deserialize<'de> for ReqFilter {
|
||||
rf.since = Deserialize::deserialize(val).ok();
|
||||
} else if key == "until" {
|
||||
rf.until = Deserialize::deserialize(val).ok();
|
||||
} else if key == "limit" {
|
||||
rf.limit = Deserialize::deserialize(val).ok();
|
||||
} else if key == "authors" {
|
||||
rf.authors = Deserialize::deserialize(val).ok();
|
||||
} else if key.starts_with('#') && key.len() > 1 && val.is_array() {
|
||||
@@ -214,6 +219,7 @@ impl ReqFilter {
|
||||
// self.id.as_ref().map(|v| v == &event.id).unwrap_or(true)
|
||||
self.ids_match(event)
|
||||
&& self.since.map(|t| event.created_at > t).unwrap_or(true)
|
||||
&& self.until.map(|t| event.created_at < t).unwrap_or(true)
|
||||
&& self.kind_match(event.kind)
|
||||
&& self.authors_match(event)
|
||||
&& self.tag_match(event)
|
||||
@@ -321,6 +327,50 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn interest_until() -> Result<()> {
|
||||
// subscription with a filter for ID and time
|
||||
let s: Subscription =
|
||||
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "until": 1000}]"#)?;
|
||||
let e = Event {
|
||||
id: "abc".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
created_at: 50,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert!(s.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn interest_range() -> Result<()> {
|
||||
// subscription with a filter for ID and time
|
||||
let s_in: Subscription =
|
||||
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 200}]"#)?;
|
||||
let s_before: Subscription =
|
||||
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 140}]"#)?;
|
||||
let s_after: Subscription =
|
||||
serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 160, "until": 200}]"#)?;
|
||||
let e = Event {
|
||||
id: "abc".to_owned(),
|
||||
pubkey: "".to_owned(),
|
||||
created_at: 150,
|
||||
kind: 0,
|
||||
tags: Vec::new(),
|
||||
content: "".to_owned(),
|
||||
sig: "".to_owned(),
|
||||
tagidx: None,
|
||||
};
|
||||
assert!(s_in.interested_in_event(&e));
|
||||
assert!(!s_before.interested_in_event(&e));
|
||||
assert!(!s_after.interested_in_event(&e));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn interest_time_and_id() -> Result<()> {
|
||||
// subscription with a filter for ID and time
|
||||
|
Reference in New Issue
Block a user