2021-12-11 22:43:41 -05:00
|
|
|
//! Server process
|
2021-12-05 21:28:02 -05:00
|
|
|
use futures::SinkExt;
|
2021-12-05 17:53:26 -05:00
|
|
|
use futures::StreamExt;
|
|
|
|
use log::*;
|
2021-12-05 19:14:14 -05:00
|
|
|
use nostr_rs_relay::close::Close;
|
2021-12-05 17:53:26 -05:00
|
|
|
use nostr_rs_relay::conn;
|
2021-12-11 16:48:59 -05:00
|
|
|
use nostr_rs_relay::db;
|
2021-12-05 17:53:26 -05:00
|
|
|
use nostr_rs_relay::error::{Error, Result};
|
|
|
|
use nostr_rs_relay::event::Event;
|
|
|
|
use nostr_rs_relay::protostream;
|
|
|
|
use nostr_rs_relay::protostream::NostrMessage::*;
|
2021-12-05 21:28:02 -05:00
|
|
|
use nostr_rs_relay::protostream::NostrResponse::*;
|
2021-12-11 16:48:59 -05:00
|
|
|
use std::collections::HashMap;
|
2021-12-05 17:53:26 -05:00
|
|
|
use std::env;
|
|
|
|
use tokio::net::{TcpListener, TcpStream};
|
|
|
|
use tokio::runtime::Builder;
|
|
|
|
use tokio::sync::broadcast;
|
2021-12-11 16:48:59 -05:00
|
|
|
use tokio::sync::broadcast::{Receiver, Sender};
|
2021-12-05 17:53:26 -05:00
|
|
|
use tokio::sync::mpsc;
|
2021-12-11 16:48:59 -05:00
|
|
|
use tokio::sync::oneshot;
|
2021-12-05 17:53:26 -05:00
|
|
|
|
|
|
|
/// Start running a Nostr relay server.
|
|
|
|
fn main() -> Result<(), Error> {
|
2021-12-11 22:43:41 -05:00
|
|
|
// setup logger and environment
|
2021-12-05 17:53:26 -05:00
|
|
|
let _ = env_logger::try_init();
|
|
|
|
let addr = env::args()
|
|
|
|
.nth(1)
|
2021-12-11 17:57:55 -05:00
|
|
|
.unwrap_or_else(|| "0.0.0.0:8080".to_string());
|
2021-12-05 17:53:26 -05:00
|
|
|
// configure tokio runtime
|
|
|
|
let rt = Builder::new_multi_thread()
|
|
|
|
.enable_all()
|
|
|
|
.thread_name("tokio-ws")
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
// start tokio
|
|
|
|
rt.block_on(async {
|
|
|
|
let listener = TcpListener::bind(&addr).await.expect("Failed to bind");
|
2021-12-12 11:03:28 -05:00
|
|
|
info!("listening on: {}", addr);
|
2021-12-11 22:43:41 -05:00
|
|
|
// all client-submitted valid events are broadcast to every
|
|
|
|
// other client on this channel. This should be large enough
|
|
|
|
// to accomodate slower readers (messages are dropped if
|
|
|
|
// clients can not keep up).
|
2021-12-11 16:48:59 -05:00
|
|
|
let (bcast_tx, _) = broadcast::channel::<Event>(4096);
|
2021-12-11 22:43:41 -05:00
|
|
|
// validated events that need to be persisted are sent to the
|
|
|
|
// database on via this channel.
|
2021-12-11 16:48:59 -05:00
|
|
|
let (event_tx, event_rx) = mpsc::channel::<Event>(16);
|
2021-12-11 22:43:41 -05:00
|
|
|
// start the database writer thread.
|
2021-12-11 16:48:59 -05:00
|
|
|
db::db_writer(event_rx).await;
|
2021-12-11 22:43:41 -05:00
|
|
|
// establish a channel for letting all threads now about a
|
|
|
|
// requested server shutdown.
|
2021-12-11 16:48:59 -05:00
|
|
|
let (invoke_shutdown, _) = broadcast::channel::<()>(1);
|
2021-12-11 22:43:41 -05:00
|
|
|
let ctrl_c_shutdown = invoke_shutdown.clone();
|
2021-12-11 16:48:59 -05:00
|
|
|
// listen for ctrl-c interruupts
|
|
|
|
tokio::spawn(async move {
|
|
|
|
tokio::signal::ctrl_c().await.unwrap();
|
2021-12-12 11:03:28 -05:00
|
|
|
info!("shutting down due to SIGINT");
|
2021-12-11 22:43:41 -05:00
|
|
|
ctrl_c_shutdown.send(()).ok();
|
2021-12-11 16:48:59 -05:00
|
|
|
});
|
|
|
|
let mut stop_listening = invoke_shutdown.subscribe();
|
2021-12-11 22:43:41 -05:00
|
|
|
// handle new client connection requests, or SIGINT signals.
|
2021-12-11 16:48:59 -05:00
|
|
|
loop {
|
|
|
|
tokio::select! {
|
|
|
|
_ = stop_listening.recv() => {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Ok((stream, _)) = listener.accept() => {
|
|
|
|
tokio::spawn(nostr_server(
|
|
|
|
stream,
|
|
|
|
bcast_tx.clone(),
|
|
|
|
event_tx.clone(),
|
|
|
|
invoke_shutdown.subscribe(),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
}
|
2021-12-05 17:53:26 -05:00
|
|
|
}
|
|
|
|
});
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-12-11 22:43:41 -05:00
|
|
|
/// Handle new client connections. This runs through an event loop
|
|
|
|
/// for all client communication.
|
2021-12-05 17:53:26 -05:00
|
|
|
async fn nostr_server(
|
|
|
|
stream: TcpStream,
|
|
|
|
broadcast: Sender<Event>,
|
2021-12-11 16:48:59 -05:00
|
|
|
event_tx: tokio::sync::mpsc::Sender<Event>,
|
|
|
|
mut shutdown: Receiver<()>,
|
2021-12-05 17:53:26 -05:00
|
|
|
) {
|
|
|
|
// get a broadcast channel for clients to communicate on
|
|
|
|
// wrap the TCP stream in a websocket.
|
2021-12-05 21:28:02 -05:00
|
|
|
let mut bcast_rx = broadcast.subscribe();
|
2021-12-11 16:48:59 -05:00
|
|
|
// upgrade the TCP connection to WebSocket
|
2021-12-05 17:53:26 -05:00
|
|
|
let conn = tokio_tungstenite::accept_async(stream).await;
|
|
|
|
let ws_stream = conn.expect("websocket handshake error");
|
2021-12-11 16:48:59 -05:00
|
|
|
// wrap websocket into a stream & sink of Nostr protocol messages
|
2021-12-05 17:53:26 -05:00
|
|
|
let mut nostr_stream = protostream::wrap_ws_in_nostr(ws_stream);
|
|
|
|
// Track internal client state
|
2021-12-05 19:14:14 -05:00
|
|
|
let mut conn = conn::ClientConn::new();
|
2021-12-11 16:48:59 -05:00
|
|
|
let cid = conn.get_client_prefix();
|
|
|
|
// Create a channel for receiving query results from the database.
|
|
|
|
// we will send out the tx handle to any query we generate.
|
|
|
|
let (query_tx, mut query_rx) = mpsc::channel::<db::QueryResult>(256);
|
|
|
|
// maintain a hashmap of a oneshot channel for active subscriptions.
|
|
|
|
// when these subscriptions are cancelled, make a message
|
|
|
|
// available to the executing query so it knows to stop.
|
|
|
|
//let (abandon_query_tx, _) = oneshot::channel::<()>();
|
|
|
|
let mut running_queries: HashMap<String, oneshot::Sender<()>> = HashMap::new();
|
2021-12-12 11:03:28 -05:00
|
|
|
// for stats, keep track of how many events the client published,
|
|
|
|
// and how many it received from queries.
|
|
|
|
let mut client_published_event_count: usize = 0;
|
|
|
|
let mut client_received_event_count: usize = 0;
|
|
|
|
info!("new connection for client: {}", cid);
|
2021-12-05 17:53:26 -05:00
|
|
|
loop {
|
|
|
|
tokio::select! {
|
2021-12-11 16:48:59 -05:00
|
|
|
_ = shutdown.recv() => {
|
|
|
|
// server shutting down, exit loop
|
|
|
|
break;
|
|
|
|
},
|
|
|
|
Some(query_result) = query_rx.recv() => {
|
2021-12-12 11:03:28 -05:00
|
|
|
// database informed us of a query result we asked for
|
2021-12-11 16:48:59 -05:00
|
|
|
let res = EventRes(query_result.sub_id,query_result.event);
|
2021-12-12 11:03:28 -05:00
|
|
|
client_received_event_count += 1;
|
2021-12-11 16:48:59 -05:00
|
|
|
nostr_stream.send(res).await.ok();
|
|
|
|
},
|
2021-12-05 21:28:02 -05:00
|
|
|
Ok(global_event) = bcast_rx.recv() => {
|
2021-12-12 11:03:28 -05:00
|
|
|
// an event has been broadcast to all clients
|
|
|
|
// first check if there is a subscription for this event.
|
2021-12-05 21:28:02 -05:00
|
|
|
let sub_name_opt = conn.get_matching_subscription(&global_event);
|
2021-12-12 11:03:28 -05:00
|
|
|
if let Some(sub_name) = sub_name_opt {
|
|
|
|
// TODO: serialize at broadcast time, instead of
|
|
|
|
// once for each consumer.
|
|
|
|
if let Ok(event_str) = serde_json::to_string(&global_event) {
|
|
|
|
debug!("sub match: client: {}, sub: {}, event: {}",
|
|
|
|
cid, sub_name,
|
|
|
|
global_event.get_event_id_prefix());
|
2021-12-05 21:28:02 -05:00
|
|
|
// create an event response and send it
|
2021-12-12 11:03:28 -05:00
|
|
|
let res = EventRes(sub_name.to_owned(),event_str);
|
2021-12-05 21:28:02 -05:00
|
|
|
nostr_stream.send(res).await.ok();
|
2021-12-11 16:48:59 -05:00
|
|
|
} else {
|
|
|
|
warn!("could not convert event to string");
|
2021-12-05 21:28:02 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
// check if this client has a subscription
|
2021-12-05 17:53:26 -05:00
|
|
|
proto_next = nostr_stream.next() => {
|
|
|
|
match proto_next {
|
2021-12-05 18:15:50 -05:00
|
|
|
Some(Ok(EventMsg(ec))) => {
|
|
|
|
// An EventCmd needs to be validated to be converted into an Event
|
2021-12-05 17:53:26 -05:00
|
|
|
// handle each type of message
|
2021-12-05 18:15:50 -05:00
|
|
|
let parsed : Result<Event> = Result::<Event>::from(ec);
|
|
|
|
match parsed {
|
2021-12-05 19:14:14 -05:00
|
|
|
Ok(e) => {
|
|
|
|
let id_prefix:String = e.id.chars().take(8).collect();
|
2021-12-12 11:03:28 -05:00
|
|
|
debug!("successfully parsed/validated event: {} from client: {}", id_prefix, cid);
|
2021-12-11 16:48:59 -05:00
|
|
|
// Write this to the database
|
|
|
|
event_tx.send(e.clone()).await.ok();
|
2021-12-12 11:03:28 -05:00
|
|
|
client_published_event_count += 1;
|
2021-12-05 21:28:02 -05:00
|
|
|
// send this event to everyone listening.
|
|
|
|
let bcast_res = broadcast.send(e);
|
|
|
|
if bcast_res.is_err() {
|
2021-12-12 11:03:28 -05:00
|
|
|
warn!("could not send broadcast message: {:?}", bcast_res);
|
2021-12-05 21:28:02 -05:00
|
|
|
}
|
|
|
|
},
|
2021-12-12 11:03:28 -05:00
|
|
|
Err(_) => {info!("client {} sent an invalid event", cid)}
|
2021-12-05 18:15:50 -05:00
|
|
|
}
|
|
|
|
},
|
|
|
|
Some(Ok(SubMsg(s))) => {
|
2021-12-12 11:03:28 -05:00
|
|
|
debug!("client {} requesting a subscription", cid);
|
2021-12-11 16:48:59 -05:00
|
|
|
|
2021-12-05 19:14:14 -05:00
|
|
|
// subscription handling consists of:
|
2021-12-11 16:48:59 -05:00
|
|
|
// * registering the subscription so future events can be matched
|
|
|
|
// * making a channel to cancel to request later
|
|
|
|
// * sending a request for a SQL query
|
|
|
|
let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>();
|
|
|
|
running_queries.insert(s.id.to_owned(), abandon_query_tx);
|
|
|
|
// register this connection
|
|
|
|
conn.subscribe(s.clone()).ok();
|
|
|
|
// start a database query
|
|
|
|
db::db_query(s, query_tx.clone(), abandon_query_rx).await;
|
2021-12-05 18:33:40 -05:00
|
|
|
},
|
2021-12-05 19:14:14 -05:00
|
|
|
Some(Ok(CloseMsg(cc))) => {
|
|
|
|
// closing a request simply removes the subscription.
|
|
|
|
let parsed : Result<Close> = Result::<Close>::from(cc);
|
|
|
|
match parsed {
|
2021-12-11 16:48:59 -05:00
|
|
|
Ok(c) => {
|
2021-12-12 11:03:28 -05:00
|
|
|
// check if a query is currently
|
|
|
|
// running, and remove it if so.
|
2021-12-11 16:48:59 -05:00
|
|
|
let stop_tx = running_queries.remove(&c.id);
|
2021-12-11 22:56:52 -05:00
|
|
|
if let Some(tx) = stop_tx {
|
|
|
|
tx.send(()).ok();
|
2021-12-11 16:48:59 -05:00
|
|
|
}
|
2021-12-12 11:03:28 -05:00
|
|
|
// stop checking new events against
|
|
|
|
// the subscription
|
2021-12-11 16:48:59 -05:00
|
|
|
conn.unsubscribe(c);
|
|
|
|
},
|
2021-12-12 11:03:28 -05:00
|
|
|
Err(_) => {info!("invalid command ignored");}
|
2021-12-05 19:14:14 -05:00
|
|
|
}
|
2021-12-05 17:53:26 -05:00
|
|
|
},
|
|
|
|
None => {
|
2021-12-12 11:03:28 -05:00
|
|
|
debug!("normal websocket close from client: {}",cid);
|
2021-12-11 16:48:59 -05:00
|
|
|
break;
|
2021-12-05 17:53:26 -05:00
|
|
|
},
|
|
|
|
Some(Err(Error::ConnError)) => {
|
2021-12-12 11:03:28 -05:00
|
|
|
debug!("got connection close/error, disconnecting client: {}",cid);
|
2021-12-11 16:48:59 -05:00
|
|
|
break;
|
2021-12-05 17:53:26 -05:00
|
|
|
}
|
|
|
|
Some(Err(e)) => {
|
2021-12-11 16:48:59 -05:00
|
|
|
info!("got non-fatal error from client: {}, error: {:?}", cid, e);
|
2021-12-05 17:53:26 -05:00
|
|
|
},
|
|
|
|
}
|
2021-12-11 16:48:59 -05:00
|
|
|
},
|
2021-12-05 17:53:26 -05:00
|
|
|
}
|
|
|
|
}
|
2021-12-11 16:48:59 -05:00
|
|
|
// connection cleanup - ensure any still running queries are terminated.
|
|
|
|
for (_, stop_tx) in running_queries.into_iter() {
|
|
|
|
stop_tx.send(()).ok();
|
|
|
|
}
|
2021-12-12 11:03:28 -05:00
|
|
|
info!(
|
|
|
|
"stopping connection for client: {} (client sent {} event(s), received {})",
|
|
|
|
cid, client_published_event_count, client_received_event_count
|
|
|
|
);
|
2021-12-05 09:42:28 -05:00
|
|
|
}
|