From ca0f01c94b9ce23e746a08da3ad29a4f473ddcd9 Mon Sep 17 00:00:00 2001 From: Greg Heartsfield Date: Sat, 11 Dec 2021 21:43:41 -0600 Subject: [PATCH] docs: add rustdoc comments --- src/close.rs | 12 +++---- src/conn.rs | 27 ++++++++++------ src/db.rs | 79 +++++++++++++++++++++++++++++---------------- src/error.rs | 8 +++-- src/event.rs | 23 ++++++++----- src/main.rs | 36 ++++++++++----------- src/protostream.rs | 35 +++++++++++--------- src/subscription.rs | 37 +++++++++++++-------- 8 files changed, 157 insertions(+), 100 deletions(-) diff --git a/src/close.rs b/src/close.rs index 5392622..c9f914e 100644 --- a/src/close.rs +++ b/src/close.rs @@ -1,14 +1,20 @@ +//! Subscription close request parsing use crate::error::{Error, Result}; use serde::{Deserialize, Serialize}; +/// Close command in network format #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct CloseCmd { + /// Protocol command, expected to always be "CLOSE". cmd: String, + /// The subscription identifier being closed. id: String, } +/// Close command parsed #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct Close { + /// The subscription identifier being closed. pub id: String, } @@ -22,9 +28,3 @@ impl From for Result { } } } - -impl Close { - pub fn get_id(&self) -> String { - self.id.clone() - } -} diff --git a/src/conn.rs b/src/conn.rs index 67c3276..9151e26 100644 --- a/src/conn.rs +++ b/src/conn.rs @@ -1,3 +1,4 @@ +//! Client connection state use crate::close::Close; use crate::error::Result; use crate::event::Event; @@ -6,34 +7,38 @@ use log::*; use std::collections::HashMap; use uuid::Uuid; -// subscription identifiers must be reasonably sized. +/// A subscription identifier has a maximum length const MAX_SUBSCRIPTION_ID_LEN: usize = 256; -// state for a client connection +/// State for a client connection pub struct ClientConn { + /// Unique client identifier generated at connection time client_id: Uuid, - // current set of subscriptions + /// The current set of active client subscriptions subscriptions: HashMap, - // websocket - //stream: WebSocketStream, + /// Per-connection maximum concurrent subscriptions max_subs: usize, } impl ClientConn { + /// Create a new, empty connection state. pub fn new() -> Self { let client_id = Uuid::new_v4(); ClientConn { - client_id: client_id, + client_id, subscriptions: HashMap::new(), max_subs: 128, } } + /// Get a short prefix of the client's unique identifier, suitable + /// for logging. pub fn get_client_prefix(&self) -> String { self.client_id.to_string().chars().take(8).collect() } - // return the first subscription that matches the event. + /// Find the first subscription identifier that matches the event, + /// if any do. pub fn get_matching_subscription(&self, e: &Event) -> Option<&str> { for (id, sub) in self.subscriptions.iter() { if sub.interested_in_event(e) { @@ -43,9 +48,12 @@ impl ClientConn { None } + /// Add a new subscription for this connection. pub fn subscribe(&mut self, s: Subscription) -> Result<()> { let k = s.get_id(); let sub_id_len = k.len(); + // prevent arbitrarily long subscription identifiers from + // being used. if sub_id_len > MAX_SUBSCRIPTION_ID_LEN { info!("Dropping subscription with huge ({}) length", sub_id_len); return Ok(()); @@ -54,7 +62,7 @@ impl ClientConn { if self.subscriptions.contains_key(&k) { self.subscriptions.remove(&k); self.subscriptions.insert(k, s); - info!("Replaced existing subscription"); + debug!("Replaced existing subscription"); return Ok(()); } @@ -72,9 +80,10 @@ impl ClientConn { return Ok(()); } + /// Remove the subscription for this connection. pub fn unsubscribe(&mut self, c: Close) { // TODO: return notice if subscription did not exist. - self.subscriptions.remove(&c.get_id()); + self.subscriptions.remove(&c.id); info!( "Removed subscription, currently have {} active subs", self.subscriptions.len() diff --git a/src/db.rs b/src/db.rs index e9f73fb..6874570 100644 --- a/src/db.rs +++ b/src/db.rs @@ -1,3 +1,4 @@ +//! Event persistence and querying use crate::error::Result; use crate::event::Event; use crate::subscription::Subscription; @@ -9,10 +10,12 @@ use rusqlite::OpenFlags; use std::path::Path; use tokio::task; +/// Database file const DB_FILE: &str = "nostr.db"; -// schema +/// Schema definition const INIT_SQL: &str = r##" +-- Database settings PRAGMA encoding = "UTF-8"; PRAGMA journal_mode=WAL; PRAGMA main.synchronous=NORMAL; @@ -20,6 +23,8 @@ PRAGMA foreign_keys = ON; PRAGMA application_id = 1654008667; PRAGMA user_version = 1; pragma mmap_size = 536870912; -- 512MB of mmap + +-- Event Table CREATE TABLE IF NOT EXISTS event ( id INTEGER PRIMARY KEY, event_hash BLOB NOT NULL, -- 4-byte hash @@ -29,23 +34,33 @@ author BLOB NOT NULL, -- author pubkey kind INTEGER NOT NULL, -- event kind content TEXT NOT NULL -- serialized json of event object ); + +-- Event Indexes CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash); CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at); CREATE INDEX IF NOT EXISTS author_index ON event(author); CREATE INDEX IF NOT EXISTS kind_index ON event(kind); + +-- Event References Table CREATE TABLE IF NOT EXISTS event_ref ( id INTEGER PRIMARY KEY, event_id INTEGER NOT NULL, -- an event ID that contains an #e tag. referenced_event BLOB NOT NULL, -- the event that is referenced. FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE ); + +-- Event References Index CREATE INDEX IF NOT EXISTS event_ref_index ON event_ref(referenced_event); + +-- Pubkey References Table CREATE TABLE IF NOT EXISTS pubkey_ref ( id INTEGER PRIMARY KEY, event_id INTEGER NOT NULL, -- an event ID that contains an #p tag. referenced_pubkey BLOB NOT NULL, -- the pubkey that is referenced. FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE RESTRICT ON DELETE CASCADE ); + +-- Pubkey References Index CREATE INDEX IF NOT EXISTS pubkey_ref_index ON pubkey_ref(referenced_pubkey); "##; @@ -70,7 +85,6 @@ pub async fn db_writer( let next_event = event_rx.blocking_recv(); // if the channel has closed, we will never get work if next_event.is_none() { - info!("No more event senders for DB, shutting down."); break; } let event = next_event.unwrap(); @@ -84,7 +98,7 @@ pub async fn db_writer( } } Err(err) => { - info!("event insert failed: {}", err); + warn!("event insert failed: {}", err); } } } @@ -94,6 +108,7 @@ pub async fn db_writer( }) } +/// Persist an event to the database. pub fn write_event(conn: &mut Connection, e: &Event) -> Result { // start transaction let tx = conn.transaction()?; @@ -101,15 +116,21 @@ pub fn write_event(conn: &mut Connection, e: &Event) -> Result { let id_blob = hex::decode(&e.id).ok(); let pubkey_blob = hex::decode(&e.pubkey).ok(); let event_str = serde_json::to_string(&e).ok(); - // ignore if the event hash is a duplicate.x + // ignore if the event hash is a duplicate. let ins_count = tx.execute( "INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, content, first_seen) VALUES (?1, ?2, ?3, ?4, ?5, strftime('%s','now'));", params![id_blob, e.created_at, e.kind, pubkey_blob, event_str] )?; + if ins_count == 0 { + // if the event was a duplicate, no need to insert event or + // pubkey references. + return Ok(ins_count); + } + // remember primary key of the event most recently inserted. let ev_id = tx.last_insert_rowid(); + // add all event tags into the event_ref table let etags = e.get_event_tags(); if etags.len() > 0 { - // this will need to for etag in etags.iter() { tx.execute( "INSERT OR IGNORE INTO event_ref (event_id, referenced_event) VALUES (?1, ?2)", @@ -117,6 +138,7 @@ pub fn write_event(conn: &mut Connection, e: &Event) -> Result { )?; } } + // add all event tags into the pubkey_ref table let ptags = e.get_pubkey_tags(); if ptags.len() > 0 { for ptag in ptags.iter() { @@ -130,18 +152,21 @@ pub fn write_event(conn: &mut Connection, e: &Event) -> Result { Ok(ins_count) } -// Queries return a subscription identifier and the serialized event. +/// Event resulting from a specific subscription request #[derive(PartialEq, Debug, Clone)] pub struct QueryResult { + /// Subscription identifier pub sub_id: String, + /// Serialized event pub event: String, } -// TODO: make this hex -fn is_alphanum(s: &str) -> bool { +/// Check if a string contains only hex characters. +fn is_hex(s: &str) -> bool { s.chars().all(|x| char::is_ascii_hexdigit(&x)) } +/// Create a dynamic SQL query string from a subscription. fn query_from_sub(sub: &Subscription) -> String { // build a dynamic SQL query. all user-input is either an integer // (sqli-safe), or a string that is filtered to only contain @@ -150,7 +175,6 @@ fn query_from_sub(sub: &Subscription) -> String { "SELECT DISTINCT(e.content) FROM event e LEFT JOIN event_ref er ON e.id=er.event_id LEFT JOIN pubkey_ref pr ON e.id=pr.event_id " .to_owned(); // for every filter in the subscription, generate a where clause - // all individual filter clause strings for this subscription let mut filter_clauses: Vec = Vec::new(); for f in sub.filters.iter() { // individual filter components @@ -160,7 +184,7 @@ fn query_from_sub(sub: &Subscription) -> String { // I believe the author & authors fields are redundant. if f.author.is_some() { let author_str = f.author.as_ref().unwrap(); - if is_alphanum(author_str) { + if is_hex(author_str) { let author_clause = format!("author = x'{}'", author_str); filter_components.push(author_clause); } @@ -172,7 +196,7 @@ fn query_from_sub(sub: &Subscription) -> String { .as_ref() .unwrap() .iter() - .filter(|&x| is_alphanum(x)) + .filter(|&x| is_hex(x)) .map(|x| format!("x'{}'", x)) .collect(); let authors_clause = format!("author IN ({})", authors_escaped.join(", ")); @@ -186,34 +210,30 @@ fn query_from_sub(sub: &Subscription) -> String { } // Query for event if f.id.is_some() { - // whitelist characters let id_str = f.id.as_ref().unwrap(); - if is_alphanum(id_str) { + if is_hex(id_str) { let id_clause = format!("event_hash = x'{}'", id_str); filter_components.push(id_clause); } } // Query for referenced event if f.event.is_some() { - // whitelist characters let ev_str = f.event.as_ref().unwrap(); - if is_alphanum(ev_str) { + if is_hex(ev_str) { let ev_clause = format!("referenced_event = x'{}'", ev_str); filter_components.push(ev_clause); } } // Query for referenced pet name pubkey if f.pubkey.is_some() { - // whitelist characters let pet_str = f.pubkey.as_ref().unwrap(); - if is_alphanum(pet_str) { + if is_hex(pet_str) { let pet_clause = format!("referenced_pubkey = x'{}'", pet_str); filter_components.push(pet_clause); } } // Query for timestamp if f.since.is_some() { - // timestamp is number, no escaping needed let created_clause = format!("created_at > {}", f.since.unwrap()); filter_components.push(created_clause); } @@ -231,10 +251,16 @@ fn query_from_sub(sub: &Subscription) -> String { query.push_str(" WHERE "); query.push_str(&filter_clauses.join(" OR ")); } - info!("Query: {}", query); - return query; + debug!("Query: {}", query); + query } +/// Perform a database query using a subscription. +/// +/// The [`Subscription`] is converted into a SQL query. Each result +/// is published on the `query_tx` channel as it is returned. If a +/// message becomes available on the `abandon_query_rx` channel, the +/// query is immediately aborted. pub async fn db_query( sub: Subscription, query_tx: tokio::sync::mpsc::Sender, @@ -246,22 +272,19 @@ pub async fn db_query( .unwrap(); info!("Opened database for reading"); info!("Going to query for: {:?}", sub); - // generate query + // generate SQL query let q = query_from_sub(&sub); - + // execute the query let mut stmt = conn.prepare(&q).unwrap(); let mut event_rows = stmt.query([]).unwrap(); - let mut i: usize = 0; while let Some(row) = event_rows.next().unwrap() { // check if this is still active (we could do this every N rows) if abandon_query_rx.try_recv().is_ok() { - info!("Abandoning query..."); - // we have received a request to abandon the query + debug!("query aborted"); return; } + // TODO: check before unwrapping let event_json = row.get(0).unwrap(); - i += 1; - info!("Sending event #{}", i); query_tx .blocking_send(QueryResult { sub_id: sub.get_id(), @@ -269,6 +292,6 @@ pub async fn db_query( }) .ok(); } - info!("Finished reading"); + debug!("query completed"); }); } diff --git a/src/error.rs b/src/error.rs index 5abe1f1..2b8a778 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,11 +1,12 @@ -//! Error handling. - +//! Error handling use std::result; use thiserror::Error; use tungstenite::error::Error as WsError; +/// Simple `Result` type for errors in this module pub type Result = result::Result; +/// Custom error type for Nostr #[derive(Error, Debug)] pub enum Error { #[error("Protocol parse error")] @@ -32,18 +33,21 @@ pub enum Error { } impl From for Error { + /// Wrap SQL error fn from(r: rusqlite::Error) -> Self { Error::SqlError(r) } } impl From for Error { + /// Wrap JSON error fn from(r: serde_json::Error) -> Self { Error::JsonParseFailed(r) } } impl From for Error { + /// Wrap Websocket error fn from(r: WsError) -> Self { Error::WebsocketError(r) } diff --git a/src/event.rs b/src/event.rs index 1ad239c..72bfd91 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1,3 +1,4 @@ +//! Event parsing and validation use crate::error::Error::*; use crate::error::Result; use bitcoin_hashes::{sha256, Hash}; @@ -8,12 +9,14 @@ use serde_json::value::Value; use serde_json::Number; use std::str::FromStr; +/// Event command in network format #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct EventCmd { cmd: String, // expecting static "EVENT" event: Event, } +/// Event parsed #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct Event { pub id: String, @@ -21,15 +24,16 @@ pub struct Event { pub(crate) created_at: u64, pub(crate) kind: u64, #[serde(deserialize_with = "tag_from_string")] - // TODO: array-of-arrays may need to be more general than a string container + // NOTE: array-of-arrays may need to be more general than a string container pub(crate) tags: Vec>, pub(crate) content: String, pub(crate) sig: String, } +/// Simple tag type for array of array of strings. type Tag = Vec>; -// handle a default value (empty vec) for null tags +/// Deserializer that ensures we always have a [`Tag`]. fn tag_from_string<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, @@ -38,6 +42,7 @@ where Ok(opt.unwrap_or_else(|| vec![])) } +/// Convert network event to parsed/validated event. impl From for Result { fn from(ec: EventCmd) -> Result { // ensure command is correct @@ -52,12 +57,12 @@ impl From for Result { } impl Event { - // get short event identifer + /// Create a short event identifier, suitable for logging. pub fn get_event_id_prefix(&self) -> String { self.id.chars().take(8).collect() } - // check if this event is valid (should be propagated, stored) based on signature. + /// Check if this event has a valid signature. fn is_valid(&self) -> bool { // validation is performed by: // * parsing JSON string into event fields @@ -89,7 +94,7 @@ impl Event { } } - // convert event to canonical representation for signing + /// Convert event to canonical representation for signing. fn to_canonical(&self) -> Option { // create a JsonValue for each event element let mut c: Vec = vec![]; @@ -110,6 +115,8 @@ impl Event { c.push(Value::String(self.content.to_owned())); serde_json::to_string(&Value::Array(c)).ok() } + + /// Convert tags to a canonical form for signing. fn tags_to_canonical(&self) -> Value { let mut tags = Vec::::new(); // iterate over self tags, @@ -124,7 +131,7 @@ impl Event { serde_json::Value::Array(tags) } - // get set of event tags + /// Get a list of event tags. pub fn get_event_tags(&self) -> Vec<&str> { let mut etags = vec![]; for t in self.tags.iter() { @@ -137,7 +144,7 @@ impl Event { etags } - // get set of pubkey tags + /// Get a list of pubkey/petname tags. pub fn get_pubkey_tags(&self) -> Vec<&str> { let mut ptags = vec![]; for t in self.tags.iter() { @@ -150,7 +157,7 @@ impl Event { ptags } - // check if given event is referenced in a tag + /// Check if a given event is referenced in an event tag. pub fn event_tag_match(&self, eventid: &str) -> bool { self.get_event_tags().contains(&eventid) } diff --git a/src/main.rs b/src/main.rs index d4e174b..699f88c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,3 +1,4 @@ +//! Server process use futures::SinkExt; use futures::StreamExt; use log::*; @@ -20,7 +21,7 @@ use tokio::sync::oneshot; /// Start running a Nostr relay server. fn main() -> Result<(), Error> { - // setup logger + // setup logger and environment let _ = env_logger::try_init(); let addr = env::args() .nth(1) @@ -35,33 +36,28 @@ fn main() -> Result<(), Error> { rt.block_on(async { let listener = TcpListener::bind(&addr).await.expect("Failed to bind"); info!("Listening on: {}", addr); - // Establish global broadcast channel. This is where all - // accepted events will be distributed for other connected clients. - - // this needs to be large enough to accomodate any slow - // readers - otherwise messages will be dropped before they - // can be processed. Since this is global to all connections, - // we can tolerate this being rather large (for 4096, the - // buffer itself is about 1MB). + // all client-submitted valid events are broadcast to every + // other client on this channel. This should be large enough + // to accomodate slower readers (messages are dropped if + // clients can not keep up). let (bcast_tx, _) = broadcast::channel::(4096); - // Establish database writer channel. This needs to be - // accessible from sync code, which is why the broadcast - // cannot be reused. + // validated events that need to be persisted are sent to the + // database on via this channel. let (event_tx, event_rx) = mpsc::channel::(16); - // start the database writer. + // start the database writer thread. db::db_writer(event_rx).await; - // setup a broadcast channel for invoking a process shutdown + // establish a channel for letting all threads now about a + // requested server shutdown. let (invoke_shutdown, _) = broadcast::channel::<()>(1); - let shutdown_handler = invoke_shutdown.clone(); + let ctrl_c_shutdown = invoke_shutdown.clone(); // listen for ctrl-c interruupts tokio::spawn(async move { tokio::signal::ctrl_c().await.unwrap(); - // Your handler here - info!("got ctrl-c"); - shutdown_handler.send(()).ok(); + info!("Shutting down due to SIGINT"); + ctrl_c_shutdown.send(()).ok(); }); let mut stop_listening = invoke_shutdown.subscribe(); - // shutdown on Ctrl-C, or accept a new connection + // handle new client connection requests, or SIGINT signals. loop { tokio::select! { _ = stop_listening.recv() => { @@ -81,6 +77,8 @@ fn main() -> Result<(), Error> { Ok(()) } +/// Handle new client connections. This runs through an event loop +/// for all client communication. async fn nostr_server( stream: TcpStream, broadcast: Sender, diff --git a/src/protostream.rs b/src/protostream.rs index 3b76191..faeecf2 100644 --- a/src/protostream.rs +++ b/src/protostream.rs @@ -1,3 +1,4 @@ +//! Nostr protocol layered over WebSocket use crate::close::CloseCmd; use crate::error::{Error, Result}; use crate::event::EventCmd; @@ -14,38 +15,43 @@ use tokio_tungstenite::WebSocketStream; use tungstenite::error::Error as WsError; use tungstenite::protocol::Message; -// A Nostr message is either event, subscription, or close. +/// Nostr protocol messages from a client #[derive(Deserialize, Serialize, Clone, PartialEq, Debug)] #[serde(untagged)] pub enum NostrMessage { + /// An `EVENT` message EventMsg(EventCmd), + /// A `REQ` message SubMsg(Subscription), + /// A `CLOSE` message CloseMsg(CloseCmd), } -// Either an event w/ subscription, or a notice +/// Nostr protocol messages from a relay/server #[derive(Deserialize, Serialize, Clone, PartialEq, Debug)] pub enum NostrResponse { + /// A `NOTICE` response NoticeRes(String), - // A subscription identifier and serialized response + /// An `EVENT` response, composed of the subscription identifier, + /// and serialized event JSON EventRes(String, String), } -// A Nostr protocol stream is layered on top of a Websocket stream. +/// A Nostr protocol stream is layered on top of a Websocket stream. pub struct NostrStream { ws_stream: WebSocketStream, } -// given a websocket, return a protocol stream -//impl Stream> + Sink +/// Given a websocket, return a protocol stream wrapper. pub fn wrap_ws_in_nostr(ws: WebSocketStream) -> NostrStream { return NostrStream { ws_stream: ws }; } +/// Implement the [`Stream`] interface to produce Nostr messages. impl Stream for NostrStream { type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // convert Message to NostrMessage + /// Convert Message to NostrMessage fn convert(msg: String) -> Result { let parsed_res: Result = serde_json::from_str(&msg).map_err(|e| e.into()); match parsed_res { @@ -56,22 +62,22 @@ impl Stream for NostrStream { } } } - match Pin::new(&mut self.ws_stream).poll_next(cx) { - Poll::Pending => Poll::Pending, // not ready - Poll::Ready(None) => Poll::Ready(None), // done + Poll::Pending => Poll::Pending, + Poll::Ready(None) => Poll::Ready(None), Poll::Ready(Some(v)) => match v { - Ok(Message::Text(vs)) => Poll::Ready(Some(convert(vs))), // convert message->basicmessage + Ok(Message::Text(vs)) => Poll::Ready(Some(convert(vs))), Ok(Message::Binary(_)) => Poll::Ready(Some(Err(Error::ProtoParseError))), Ok(Message::Pong(_)) | Ok(Message::Ping(_)) => Poll::Pending, Ok(Message::Close(_)) => Poll::Ready(None), - Err(WsError::AlreadyClosed) | Err(WsError::ConnectionClosed) => Poll::Ready(None), // done + Err(WsError::AlreadyClosed) | Err(WsError::ConnectionClosed) => Poll::Ready(None), Err(_) => Poll::Ready(Some(Err(Error::ConnError))), }, } } } +/// Implement the [`Sink`] interface to produce Nostr responses. impl Sink for NostrStream { type Error = Error; @@ -85,9 +91,8 @@ impl Sink for NostrStream { } fn start_send(mut self: Pin<&mut Self>, item: NostrResponse) -> Result<(), Self::Error> { - //let res_message = serde_json::to_string(&item).expect("Could convert message to string"); - // create the string to send. - // TODO: do real escaping for both of these. Currently output isn't correctly escaped. + // TODO: do real escaping for these - at least on NOTICE, + // which surely has some problems if arbitrary text is sent. let send_str = match item { NostrResponse::NoticeRes(msg) => { let s = msg.replace("\"", ""); diff --git a/src/subscription.rs b/src/subscription.rs index 22710b1..8bf480c 100644 --- a/src/subscription.rs +++ b/src/subscription.rs @@ -1,30 +1,44 @@ -use crate::error::{Error, Result}; +//! Subscription and filter parsing +use crate::error::Result; use crate::event::Event; use serde::{Deserialize, Deserializer, Serialize}; -//use serde_json::json; -//use serde_json::Result; +/// Subscription identifier and set of request filters #[derive(Serialize, PartialEq, Debug, Clone)] pub struct Subscription { pub id: String, pub filters: Vec, } +/// Filter for requests +/// +/// Corresponds to client-provided subscription request elements. Any +/// element can be present if it should be used in filtering, or +/// absent ([`None`]) if it should be ignored. #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] #[serde(deny_unknown_fields)] pub struct ReqFilter { + /// Event hash pub id: Option, + /// Author public key pub author: Option, + /// Event kind pub kind: Option, + /// Referenced event hash #[serde(rename = "#e")] pub event: Option, + /// Referenced public key for a petname #[serde(rename = "#p")] pub pubkey: Option, + /// Events published after this time pub since: Option, + /// List of author public keys pub authors: Option>, } impl<'de> Deserialize<'de> for Subscription { + /// Custom deserializer for subscriptions, which have a more + /// complex structure than the other message types. fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, @@ -74,17 +88,13 @@ impl<'de> Deserialize<'de> for Subscription { } impl Subscription { - pub fn parse(json: &str) -> Result { - serde_json::from_str(json).map_err(|e| Error::JsonParseFailed(e)) - } + /// Get a copy of the subscription identifier. pub fn get_id(&self) -> String { self.id.clone() } - pub fn get_filter_count(&self) -> usize { - self.filters.len() - } + /// Determine if this subscription matches a given [`Event`]. Any + /// individual filter match is sufficient. pub fn interested_in_event(&self, event: &Event) -> bool { - // loop through every filter, and return true if any match this event. for f in self.filters.iter() { if f.interested_in_event(event) { return true; @@ -95,7 +105,7 @@ impl Subscription { } impl ReqFilter { - // attempt to match against author/authors fields + /// Check if this filter either matches, or does not care about an author. fn author_match(&self, event: &Event) -> bool { self.authors .as_ref() @@ -107,6 +117,7 @@ impl ReqFilter { .map(|v| v == &event.pubkey) .unwrap_or(true) } + /// Check if this filter either matches, or does not care about the event tags. fn event_match(&self, event: &Event) -> bool { self.event .as_ref() @@ -114,13 +125,13 @@ impl ReqFilter { .unwrap_or(true) } + /// Check if this filter either matches, or does not care about the kind. fn kind_match(&self, kind: u64) -> bool { self.kind.map(|v| v == kind).unwrap_or(true) } + /// Determine if all populated fields in this filter match the provided event. pub fn interested_in_event(&self, event: &Event) -> bool { - // determine if all populated fields in this filter match the provided event. - // a filter matches an event if all the populated fields match. self.id.as_ref().map(|v| v == &event.id).unwrap_or(true) && self.since.map(|t| event.created_at > t).unwrap_or(true) && self.kind_match(event.kind)