2022-09-06 06:56:04 -04:00
//! Server process
use crate ::close ::Close ;
use crate ::close ::CloseCmd ;
2022-09-11 12:01:36 -04:00
use crate ::config ::{ Settings , VerifiedUsersMode } ;
2022-09-06 06:56:04 -04:00
use crate ::conn ;
2023-01-22 10:49:49 -05:00
use crate ::repo ::NostrRepo ;
2022-09-06 06:56:04 -04:00
use crate ::db ;
use crate ::db ::SubmittedEvent ;
use crate ::error ::{ Error , Result } ;
use crate ::event ::Event ;
use crate ::event ::EventCmd ;
use crate ::info ::RelayInfo ;
use crate ::nip05 ;
2022-11-10 17:27:10 -05:00
use crate ::notice ::Notice ;
2022-09-06 06:56:04 -04:00
use crate ::subscription ::Subscription ;
use futures ::SinkExt ;
use futures ::StreamExt ;
2022-12-17 10:27:29 -05:00
use governor ::{ Jitter , Quota , RateLimiter } ;
2022-11-03 14:01:38 -04:00
use http ::header ::HeaderMap ;
2022-09-06 06:56:04 -04:00
use hyper ::header ::ACCEPT ;
use hyper ::service ::{ make_service_fn , service_fn } ;
use hyper ::upgrade ::Upgraded ;
use hyper ::{
header , server ::conn ::AddrStream , upgrade , Body , Request , Response , Server , StatusCode ,
} ;
use serde ::{ Deserialize , Serialize } ;
use serde_json ::json ;
use std ::collections ::HashMap ;
use std ::convert ::Infallible ;
use std ::net ::SocketAddr ;
use std ::path ::Path ;
2022-12-27 10:48:07 -05:00
use std ::sync ::Arc ;
2022-12-25 11:45:41 -05:00
use std ::sync ::atomic ::Ordering ;
2022-09-06 07:12:07 -04:00
use std ::sync ::mpsc ::Receiver as MpscReceiver ;
2022-09-06 06:56:04 -04:00
use std ::time ::Duration ;
use std ::time ::Instant ;
use tokio ::runtime ::Builder ;
use tokio ::sync ::broadcast ::{ self , Receiver , Sender } ;
use tokio ::sync ::mpsc ;
use tokio ::sync ::oneshot ;
use tokio_tungstenite ::WebSocketStream ;
2023-01-22 10:49:49 -05:00
use tracing ::{ debug , error , info , trace , warn } ;
2022-09-06 06:56:04 -04:00
use tungstenite ::error ::CapacityError ::MessageTooLong ;
use tungstenite ::error ::Error as WsError ;
use tungstenite ::handshake ;
use tungstenite ::protocol ::Message ;
use tungstenite ::protocol ::WebSocketConfig ;
2023-01-22 10:49:49 -05:00
/// Handle arbitrary HTTP requests, including for `WebSocket` upgrades.
2022-09-06 06:56:04 -04:00
async fn handle_web_request (
mut request : Request < Body > ,
2023-01-22 10:49:49 -05:00
repo : Arc < dyn NostrRepo > ,
2022-09-06 07:12:07 -04:00
settings : Settings ,
2022-09-06 06:56:04 -04:00
remote_addr : SocketAddr ,
broadcast : Sender < Event > ,
event_tx : tokio ::sync ::mpsc ::Sender < SubmittedEvent > ,
shutdown : Receiver < ( ) > ,
) -> Result < Response < Body > , Infallible > {
match (
request . uri ( ) . path ( ) ,
request . headers ( ) . contains_key ( header ::UPGRADE ) ,
) {
// Request for / as websocket
( " / " , true ) = > {
trace! ( " websocket with upgrade request " ) ;
//assume request is a handshake, so create the handshake response
let response = match handshake ::server ::create_response_with_body ( & request , | | {
Body ::empty ( )
} ) {
Ok ( response ) = > {
//in case the handshake response creation succeeds,
//spawn a task to handle the websocket connection
tokio ::spawn ( async move {
//using the hyper feature of upgrading a connection
match upgrade ::on ( & mut request ) . await {
//if successfully upgraded
Ok ( upgraded ) = > {
// set WebSocket configuration options
2022-09-06 07:12:07 -04:00
let config = WebSocketConfig {
2023-01-02 16:39:28 -05:00
max_send_queue : Some ( 1024 ) ,
2022-09-06 07:12:07 -04:00
max_message_size : settings . limits . max_ws_message_bytes ,
max_frame_size : settings . limits . max_ws_frame_bytes ,
.. Default ::default ( )
} ;
2022-09-06 06:56:04 -04:00
//create a websocket stream from the upgraded object
let ws_stream = WebSocketStream ::from_raw_socket (
//pass the upgraded object
//as the base layer stream of the Websocket
upgraded ,
tokio_tungstenite ::tungstenite ::protocol ::Role ::Server ,
Some ( config ) ,
)
. await ;
2022-12-22 17:55:53 -05:00
let origin = get_header_string ( " origin " , request . headers ( ) ) ;
2022-11-05 11:29:25 -04:00
let user_agent = get_header_string ( " user-agent " , request . headers ( ) ) ;
2022-11-04 19:05:01 -04:00
// determine the remote IP from headers if the exist
let header_ip = settings
. network
. remote_ip_header
. as_ref ( )
2022-11-04 20:33:50 -04:00
. and_then ( | x | get_header_string ( x , request . headers ( ) ) ) ;
2022-11-04 19:05:01 -04:00
// use the socket addr as a backup
2022-11-03 14:01:38 -04:00
let remote_ip =
2022-11-04 19:05:01 -04:00
header_ip . unwrap_or_else ( | | remote_addr . ip ( ) . to_string ( ) ) ;
2022-11-05 11:29:25 -04:00
let client_info = ClientInfo {
remote_ip ,
user_agent ,
2022-12-22 17:55:53 -05:00
origin ,
2022-11-05 11:29:25 -04:00
} ;
2022-11-04 19:05:01 -04:00
// spawn a nostr server with our websocket
2022-09-06 06:56:04 -04:00
tokio ::spawn ( nostr_server (
2023-01-22 10:49:49 -05:00
repo ,
2022-11-05 11:29:25 -04:00
client_info ,
settings ,
ws_stream ,
broadcast ,
event_tx ,
2022-11-02 19:33:44 -04:00
shutdown ,
2022-09-06 06:56:04 -04:00
) ) ;
}
2022-11-02 19:33:44 -04:00
// todo: trace, don't print...
2022-09-06 06:56:04 -04:00
Err ( e ) = > println! (
" error when trying to upgrade connection \
from address { } to websocket connection . \
Error is : { } " ,
remote_addr , e
) ,
}
} ) ;
//return the response to the handshake request
response
}
Err ( error ) = > {
warn! ( " websocket response failed " ) ;
let mut res =
Response ::new ( Body ::from ( format! ( " Failed to create websocket: {} " , error ) ) ) ;
* res . status_mut ( ) = StatusCode ::BAD_REQUEST ;
return Ok ( res ) ;
}
} ;
Ok ::< _ , Infallible > ( response )
}
// Request for Relay info
( " / " , false ) = > {
// handle request at root with no upgrade header
// Check if this is a nostr server info request
let accept_header = & request . headers ( ) . get ( ACCEPT ) ;
// check if application/nostr+json is included
if let Some ( media_types ) = accept_header {
if let Ok ( mt_str ) = media_types . to_str ( ) {
if mt_str . contains ( " application/nostr+json " ) {
// build a relay info response
debug! ( " Responding to server info request " ) ;
2022-09-06 07:12:07 -04:00
let rinfo = RelayInfo ::from ( settings . info ) ;
2022-09-06 06:56:04 -04:00
let b = Body ::from ( serde_json ::to_string_pretty ( & rinfo ) . unwrap ( ) ) ;
return Ok ( Response ::builder ( )
. status ( 200 )
. header ( " Content-Type " , " application/nostr+json " )
. header ( " Access-Control-Allow-Origin " , " * " )
. body ( b )
. unwrap ( ) ) ;
}
}
}
Ok ( Response ::builder ( )
. status ( 200 )
. header ( " Content-Type " , " text/plain " )
. body ( Body ::from ( " Please use a Nostr client to connect. " ) )
. unwrap ( ) )
}
( _ , _ ) = > {
//handle any other url
Ok ( Response ::builder ( )
. status ( StatusCode ::NOT_FOUND )
. body ( Body ::from ( " Nothing here. " ) )
. unwrap ( ) )
}
}
}
2022-11-04 20:33:50 -04:00
fn get_header_string ( header : & str , headers : & HeaderMap ) -> Option < String > {
2022-11-04 19:05:01 -04:00
headers
. get ( header )
2023-01-22 10:49:49 -05:00
. and_then ( | x | x . to_str ( ) . ok ( ) . map ( std ::string ::ToString ::to_string ) )
2022-11-03 14:01:38 -04:00
}
2022-09-06 07:12:07 -04:00
// return on a control-c or internally requested shutdown signal
async fn ctrl_c_or_signal ( mut shutdown_signal : Receiver < ( ) > ) {
2022-09-28 08:20:31 -04:00
let mut term_signal = tokio ::signal ::unix ::signal ( tokio ::signal ::unix ::SignalKind ::terminate ( ) )
. expect ( " could not define signal " ) ;
2022-09-06 07:12:07 -04:00
loop {
tokio ::select! {
2022-09-28 08:20:31 -04:00
_ = shutdown_signal . recv ( ) = > {
2022-09-06 07:12:07 -04:00
info! ( " Shutting down webserver as requested " ) ;
// server shutting down, exit loop
break ;
} ,
_ = tokio ::signal ::ctrl_c ( ) = > {
info! ( " Shutting down webserver due to SIGINT " ) ;
break ;
2022-09-28 08:20:31 -04:00
} ,
_ = term_signal . recv ( ) = > {
info! ( " Shutting down webserver due to SIGTERM " ) ;
break ;
} ,
2022-09-06 07:12:07 -04:00
}
}
2022-09-06 06:56:04 -04:00
}
/// Start running a Nostr relay server.
2023-01-22 10:49:49 -05:00
pub fn start_server ( settings : & Settings , shutdown_rx : MpscReceiver < ( ) > ) -> Result < ( ) , Error > {
2022-09-06 06:56:04 -04:00
trace! ( " Config: {:?} " , settings ) ;
// do some config validation.
if ! Path ::new ( & settings . database . data_directory ) . is_dir ( ) {
error! ( " Database directory does not exist " ) ;
return Err ( Error ::DatabaseDirError ) ;
}
let addr = format! (
" {}:{} " ,
settings . network . address . trim ( ) ,
settings . network . port
) ;
let socket_addr = addr . parse ( ) . expect ( " listening address not valid " ) ;
// address whitelisting settings
if let Some ( addr_whitelist ) = & settings . authorization . pubkey_whitelist {
info! (
" Event publishing restricted to {} pubkey(s) " ,
addr_whitelist . len ( )
) ;
}
// check if NIP-05 enforced user verification is on
if settings . verified_users . is_active ( ) {
info! (
" NIP-05 user verification mode:{:?} " ,
settings . verified_users . mode
) ;
if let Some ( d ) = settings . verified_users . verify_update_duration ( ) {
info! ( " NIP-05 check user verification every: {:?} " , d ) ;
}
if let Some ( d ) = settings . verified_users . verify_expiration_duration ( ) {
info! ( " NIP-05 user verification expires after: {:?} " , d ) ;
}
if let Some ( wl ) = & settings . verified_users . domain_whitelist {
info! ( " NIP-05 domain whitelist: {:?} " , wl ) ;
}
if let Some ( bl ) = & settings . verified_users . domain_blacklist {
info! ( " NIP-05 domain blacklist: {:?} " , bl ) ;
}
}
// configure tokio runtime
let rt = Builder ::new_multi_thread ( )
. enable_all ( )
2022-12-25 11:45:41 -05:00
. thread_name_fn ( | | {
// give each thread a unique numeric name
static ATOMIC_ID : std ::sync ::atomic ::AtomicUsize = std ::sync ::atomic ::AtomicUsize ::new ( 0 ) ;
let id = ATOMIC_ID . fetch_add ( 1 , Ordering ::SeqCst ) ;
format! ( " tokio-ws- {} " , id )
} )
2022-12-18 10:14:04 -05:00
// limit concurrent SQLite blocking threads
. max_blocking_threads ( settings . limits . max_blocking_threads )
. on_thread_start ( | | {
2022-12-25 11:45:41 -05:00
trace! ( " started new thread: {:?} " , std ::thread ::current ( ) . name ( ) ) ;
2022-12-18 10:14:04 -05:00
} )
. on_thread_stop ( | | {
2022-12-25 11:45:41 -05:00
trace! ( " stopped thread: {:?} " , std ::thread ::current ( ) . name ( ) ) ;
2022-12-18 10:14:04 -05:00
} )
2022-09-06 06:56:04 -04:00
. build ( )
. unwrap ( ) ;
// start tokio
rt . block_on ( async {
2022-09-06 07:12:07 -04:00
let broadcast_buffer_limit = settings . limits . broadcast_buffer ;
let persist_buffer_limit = settings . limits . event_persist_buffer ;
let verified_users_active = settings . verified_users . is_active ( ) ;
let settings = settings . clone ( ) ;
2022-09-06 06:56:04 -04:00
info! ( " listening on: {} " , socket_addr ) ;
// all client-submitted valid events are broadcast to every
// other client on this channel. This should be large enough
// to accomodate slower readers (messages are dropped if
// clients can not keep up).
let ( bcast_tx , _ ) = broadcast ::channel ::< Event > ( broadcast_buffer_limit ) ;
// validated events that need to be persisted are sent to the
// database on via this channel.
let ( event_tx , event_rx ) = mpsc ::channel ::< SubmittedEvent > ( persist_buffer_limit ) ;
// establish a channel for letting all threads now about a
// requested server shutdown.
let ( invoke_shutdown , shutdown_listen ) = broadcast ::channel ::< ( ) > ( 1 ) ;
// create a channel for sending any new metadata event. These
// will get processed relatively slowly (a potentially
// multi-second blocking HTTP call) on a single thread, so we
// buffer requests on the channel. No harm in dropping events
// here, since we are protecting against DoS. This can make
// it difficult to setup initial metadata in bulk, since
// overwhelming this will drop events and won't register
// metadata events.
let ( metadata_tx , metadata_rx ) = broadcast ::channel ::< Event > ( 4096 ) ;
2023-01-22 10:49:49 -05:00
// build a repository for events
let repo = db ::build_repo ( & settings ) . await ;
// start the database writer task. Give it a channel for
2022-09-06 06:56:04 -04:00
// writing events, and for publishing events that have been
// written (to all connected clients).
2023-01-22 10:49:49 -05:00
tokio ::task ::spawn (
db ::db_writer (
repo . clone ( ) ,
settings . clone ( ) ,
event_rx ,
bcast_tx . clone ( ) ,
metadata_tx . clone ( ) ,
shutdown_listen ,
) ) ;
2022-09-06 06:56:04 -04:00
info! ( " db writer created " ) ;
2022-09-11 12:01:36 -04:00
// create a nip-05 verifier thread; if enabled.
if settings . verified_users . mode ! = VerifiedUsersMode ::Disabled {
let verifier_opt =
2023-01-22 10:49:49 -05:00
nip05 ::Verifier ::new ( repo . clone ( ) , metadata_rx , bcast_tx . clone ( ) , settings . clone ( ) ) ;
2022-09-11 12:01:36 -04:00
if let Ok ( mut v ) = verifier_opt {
if verified_users_active {
tokio ::task ::spawn ( async move {
info! ( " starting up NIP-05 verifier... " ) ;
v . run ( ) . await ;
} ) ;
}
2022-09-06 06:56:04 -04:00
}
}
2022-12-22 16:16:21 -05:00
2022-09-06 07:12:07 -04:00
// listen for (external to tokio) shutdown request
let controlled_shutdown = invoke_shutdown . clone ( ) ;
tokio ::spawn ( async move {
info! ( " control message listener started " ) ;
2023-01-22 10:49:49 -05:00
// we only have good "shutdown" messages propagation from this-> controlled shutdown. Not from controlled_shutdown-> this. Which means we have a task that is stuck waiting on a sync receive. recv is blocking, and this is async.
2022-09-06 07:12:07 -04:00
match shutdown_rx . recv ( ) {
Ok ( ( ) ) = > {
info! ( " control message requesting shutdown " ) ;
controlled_shutdown . send ( ( ) ) . ok ( ) ;
2023-01-22 10:49:49 -05:00
} ,
2022-09-06 07:12:07 -04:00
Err ( std ::sync ::mpsc ::RecvError ) = > {
2023-01-22 10:49:49 -05:00
trace! ( " shutdown requestor is disconnected (this is normal) " ) ;
2022-09-06 07:12:07 -04:00
}
} ;
} ) ;
// listen for ctrl-c interruupts
2022-09-06 06:56:04 -04:00
let ctrl_c_shutdown = invoke_shutdown . clone ( ) ;
2022-09-11 13:44:45 -04:00
// listener for webserver shutdown
let webserver_shutdown_listen = invoke_shutdown . subscribe ( ) ;
2022-09-06 06:56:04 -04:00
tokio ::spawn ( async move {
tokio ::signal ::ctrl_c ( ) . await . unwrap ( ) ;
2022-09-06 07:12:07 -04:00
info! ( " shutting down due to SIGINT (main) " ) ;
2022-09-06 06:56:04 -04:00
ctrl_c_shutdown . send ( ( ) ) . ok ( ) ;
} ) ;
2022-12-25 11:43:47 -05:00
// spawn a task to check the pool size.
2023-01-22 10:49:49 -05:00
//let pool_monitor = pool.clone();
//tokio::spawn(async move {db::monitor_pool("reader", pool_monitor).await;});
2022-12-25 11:43:47 -05:00
2022-09-06 06:56:04 -04:00
// A `Service` is needed for every connection, so this
// creates one from our `handle_request` function.
let make_svc = make_service_fn ( | conn : & AddrStream | {
2023-01-22 10:49:49 -05:00
let repo = repo . clone ( ) ;
2022-09-06 06:56:04 -04:00
let remote_addr = conn . remote_addr ( ) ;
let bcast = bcast_tx . clone ( ) ;
let event = event_tx . clone ( ) ;
let stop = invoke_shutdown . clone ( ) ;
2022-09-06 07:12:07 -04:00
let settings = settings . clone ( ) ;
2022-09-06 06:56:04 -04:00
async move {
// service_fn converts our function into a `Service`
Ok ::< _ , Infallible > ( service_fn ( move | request : Request < Body > | {
handle_web_request (
request ,
2023-01-22 10:49:49 -05:00
repo . clone ( ) ,
2022-09-06 07:12:07 -04:00
settings . clone ( ) ,
2022-09-06 06:56:04 -04:00
remote_addr ,
bcast . clone ( ) ,
event . clone ( ) ,
stop . subscribe ( ) ,
)
} ) )
}
} ) ;
let server = Server ::bind ( & socket_addr )
. serve ( make_svc )
2022-09-11 13:44:45 -04:00
. with_graceful_shutdown ( ctrl_c_or_signal ( webserver_shutdown_listen ) ) ;
// run hyper in this thread. This is why the thread does not return.
2022-09-06 06:56:04 -04:00
if let Err ( e ) = server . await {
eprintln! ( " server error: {} " , e ) ;
}
} ) ;
Ok ( ( ) )
}
/// Nostr protocol messages from a client
2022-09-24 09:30:22 -04:00
#[ derive(Deserialize, Serialize, Clone, PartialEq, Eq, Debug) ]
2022-09-06 06:56:04 -04:00
#[ serde(untagged) ]
pub enum NostrMessage {
/// An `EVENT` message
EventMsg ( EventCmd ) ,
/// A `REQ` message
SubMsg ( Subscription ) ,
/// A `CLOSE` message
CloseMsg ( CloseCmd ) ,
}
2023-01-22 10:49:49 -05:00
/// Convert Message to `NostrMessage`
fn convert_to_msg ( msg : & str , max_bytes : Option < usize > ) -> Result < NostrMessage > {
let parsed_res : Result < NostrMessage > = serde_json ::from_str ( msg ) . map_err ( std ::convert ::Into ::into ) ;
2022-09-06 06:56:04 -04:00
match parsed_res {
Ok ( m ) = > {
2023-01-09 23:12:20 -05:00
if let NostrMessage ::SubMsg ( _ ) = m {
2023-01-11 17:56:40 -05:00
// note; this only prints the first 16k of a REQ and then truncates.
trace! ( " REQ: {:?} " , msg ) ;
2023-01-09 23:12:20 -05:00
} ;
2022-09-06 06:56:04 -04:00
if let NostrMessage ::EventMsg ( _ ) = m {
2022-09-06 07:12:07 -04:00
if let Some ( max_size ) = max_bytes {
2022-09-06 06:56:04 -04:00
// check length, ensure that some max size is set.
if msg . len ( ) > max_size & & max_size > 0 {
return Err ( Error ::EventMaxLengthError ( msg . len ( ) ) ) ;
}
}
}
Ok ( m )
}
Err ( e ) = > {
2023-01-10 08:41:49 -05:00
trace! ( " proto parse error: {:?} " , e ) ;
trace! ( " parse error on message: {:?} " , msg . trim ( ) ) ;
2022-09-06 06:56:04 -04:00
Err ( Error ::ProtoParseError )
}
}
}
2023-01-22 10:49:49 -05:00
/// Turn a string into a NOTICE message ready to send over a `WebSocket`
fn make_notice_message ( notice : & Notice ) -> Message {
2022-11-10 17:27:10 -05:00
let json = match notice {
Notice ::Message ( ref msg ) = > json! ( [ " NOTICE " , msg ] ) ,
Notice ::EventResult ( ref res ) = > json! ( [ " OK " , res . id , res . status . to_bool ( ) , res . msg ] ) ,
} ;
Message ::text ( json . to_string ( ) )
2022-09-06 06:56:04 -04:00
}
2022-11-05 11:29:25 -04:00
struct ClientInfo {
remote_ip : String ,
user_agent : Option < String > ,
2022-12-22 17:55:53 -05:00
origin : Option < String > ,
2022-11-05 11:29:25 -04:00
}
2022-09-06 06:56:04 -04:00
/// Handle new client connections. This runs through an event loop
/// for all client communication.
async fn nostr_server (
2023-01-22 10:49:49 -05:00
repo : Arc < dyn NostrRepo > ,
2022-11-05 11:29:25 -04:00
client_info : ClientInfo ,
2022-09-06 07:12:07 -04:00
settings : Settings ,
2022-09-06 06:56:04 -04:00
mut ws_stream : WebSocketStream < Upgraded > ,
broadcast : Sender < Event > ,
event_tx : mpsc ::Sender < SubmittedEvent > ,
mut shutdown : Receiver < ( ) > ,
) {
2022-12-16 18:01:49 -05:00
// the time this websocket nostr server started
let orig_start = Instant ::now ( ) ;
2022-09-06 06:56:04 -04:00
// get a broadcast channel for clients to communicate on
let mut bcast_rx = broadcast . subscribe ( ) ;
// Track internal client state
2022-11-05 11:29:25 -04:00
let mut conn = conn ::ClientConn ::new ( client_info . remote_ip ) ;
2022-12-17 10:27:29 -05:00
// subscription creation rate limiting
let mut sub_lim_opt = None ;
// 100ms jitter when the rate limiter returns
let jitter = Jitter ::up_to ( Duration ::from_millis ( 100 ) ) ;
let sub_per_min_setting = settings . limits . subscriptions_per_min ;
if let Some ( sub_per_min ) = sub_per_min_setting {
if sub_per_min > 0 {
trace! ( " Rate limits for sub creation ({}/min) " , sub_per_min ) ;
let quota_time = core ::num ::NonZeroU32 ::new ( sub_per_min ) . unwrap ( ) ;
let quota = Quota ::per_minute ( quota_time ) ;
sub_lim_opt = Some ( RateLimiter ::direct ( quota ) ) ;
}
}
2022-11-02 19:33:44 -04:00
// Use the remote IP as the client identifier
2022-09-06 06:56:04 -04:00
let cid = conn . get_client_prefix ( ) ;
// Create a channel for receiving query results from the database.
// we will send out the tx handle to any query we generate.
2022-12-18 14:44:28 -05:00
// this has capacity for some of the larger requests we see, which
// should allow the DB thread to release the handle earlier.
let ( query_tx , mut query_rx ) = mpsc ::channel ::< db ::QueryResult > ( 20000 ) ;
2022-09-06 06:56:04 -04:00
// Create channel for receiving NOTICEs
2022-12-18 14:44:28 -05:00
let ( notice_tx , mut notice_rx ) = mpsc ::channel ::< Notice > ( 128 ) ;
2022-09-06 06:56:04 -04:00
// last time this client sent data (message, ping, etc.)
let mut last_message_time = Instant ::now ( ) ;
// ping interval (every 5 minutes)
2022-11-05 08:42:08 -04:00
let default_ping_dur = Duration ::from_secs ( settings . network . ping_interval_seconds . into ( ) ) ;
2022-09-06 06:56:04 -04:00
// disconnect after 20 minutes without a ping response or event.
let max_quiet_time = Duration ::from_secs ( 60 * 20 ) ;
let start = tokio ::time ::Instant ::now ( ) + default_ping_dur ;
let mut ping_interval = tokio ::time ::interval_at ( start , default_ping_dur ) ;
// maintain a hashmap of a oneshot channel for active subscriptions.
// when these subscriptions are cancelled, make a message
// available to the executing query so it knows to stop.
let mut running_queries : HashMap < String , oneshot ::Sender < ( ) > > = HashMap ::new ( ) ;
// for stats, keep track of how many events the client published,
// and how many it received from queries.
let mut client_published_event_count : usize = 0 ;
let mut client_received_event_count : usize = 0 ;
2022-12-16 16:22:27 -05:00
debug! ( " new client connection (cid: {}, ip: {:?}) " , cid , conn . ip ( ) ) ;
2022-12-24 11:29:47 -05:00
let origin = client_info . origin . unwrap_or_else ( | | " <unspecified> " . into ( ) ) ;
let user_agent = client_info
. user_agent
. unwrap_or_else ( | | " <unspecified> " . into ( ) ) ;
2022-12-22 17:55:53 -05:00
debug! (
" cid: {}, origin: {:?}, user-agent: {:?} " ,
cid , origin , user_agent
) ;
2022-09-06 06:56:04 -04:00
loop {
tokio ::select! {
_ = shutdown . recv ( ) = > {
2022-12-16 18:01:49 -05:00
info! ( " Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?} " , cid , conn . ip ( ) , orig_start . elapsed ( ) ) ;
2022-09-06 06:56:04 -04:00
// server shutting down, exit loop
break ;
} ,
_ = ping_interval . tick ( ) = > {
// check how long since we talked to client
// if it has been too long, disconnect
if last_message_time . elapsed ( ) > max_quiet_time {
debug! ( " ending connection due to lack of client ping response " ) ;
break ;
}
// Send a ping
ws_stream . send ( Message ::Ping ( Vec ::new ( ) ) ) . await . ok ( ) ;
} ,
Some ( notice_msg ) = notice_rx . recv ( ) = > {
2023-01-22 10:49:49 -05:00
ws_stream . send ( make_notice_message ( & notice_msg ) ) . await . ok ( ) ;
2022-09-06 06:56:04 -04:00
} ,
Some ( query_result ) = query_rx . recv ( ) = > {
// database informed us of a query result we asked for
let subesc = query_result . sub_id . replace ( '"' , " " ) ;
if query_result . event = = " EOSE " {
let send_str = format! ( " [ \" EOSE \" , \" {} \" ] " , subesc ) ;
ws_stream . send ( Message ::Text ( send_str ) ) . await . ok ( ) ;
} else {
client_received_event_count + = 1 ;
// send a result
let send_str = format! ( " [ \" EVENT \" , \" {} \" , {} ] " , subesc , & query_result . event ) ;
ws_stream . send ( Message ::Text ( send_str ) ) . await . ok ( ) ;
}
} ,
// TODO: consider logging the LaggedRecv error
Ok ( global_event ) = bcast_rx . recv ( ) = > {
// an event has been broadcast to all clients
// first check if there is a subscription for this event.
2022-11-08 21:02:27 -05:00
for ( s , sub ) in conn . subscriptions ( ) {
if ! sub . interested_in_event ( & global_event ) {
continue ;
}
2022-09-06 06:56:04 -04:00
// TODO: serialize at broadcast time, instead of
// once for each consumer.
if let Ok ( event_str ) = serde_json ::to_string ( & global_event ) {
2022-12-22 11:29:27 -05:00
trace! ( " sub match for client: {}, sub: {:?}, event: {:?} " ,
2022-09-06 06:56:04 -04:00
cid , s ,
global_event . get_event_id_prefix ( ) ) ;
// create an event response and send it
let subesc = s . replace ( '"' , " " ) ;
ws_stream . send ( Message ::Text ( format! ( " [ \" EVENT \" , \" {} \" , {} ] " , subesc , event_str ) ) ) . await . ok ( ) ;
} else {
2022-09-11 11:22:01 -04:00
warn! ( " could not serialize event: {:?} " , global_event . get_event_id_prefix ( ) ) ;
2022-09-06 06:56:04 -04:00
}
}
} ,
ws_next = ws_stream . next ( ) = > {
// update most recent message time for client
last_message_time = Instant ::now ( ) ;
// Consume text messages from the client, parse into Nostr messages.
let nostr_msg = match ws_next {
Some ( Ok ( Message ::Text ( m ) ) ) = > {
2023-01-22 10:49:49 -05:00
convert_to_msg ( & m , settings . limits . max_event_bytes )
2022-09-06 06:56:04 -04:00
} ,
Some ( Ok ( Message ::Binary ( _ ) ) ) = > {
ws_stream . send (
2023-01-22 10:49:49 -05:00
make_notice_message ( & Notice ::message ( " binary messages are not accepted " . into ( ) ) ) ) . await . ok ( ) ;
2022-09-06 06:56:04 -04:00
continue ;
} ,
2022-09-24 10:01:09 -04:00
Some ( Ok ( Message ::Ping ( _ ) | Message ::Pong ( _ ) ) ) = > {
2022-09-06 06:56:04 -04:00
// get a ping/pong, ignore. tungstenite will
// send responses automatically.
continue ;
} ,
Some ( Err ( WsError ::Capacity ( MessageTooLong { size , max_size } ) ) ) = > {
ws_stream . send (
2023-01-22 10:49:49 -05:00
make_notice_message ( & Notice ::message ( format! ( " message too large ( {} > {} ) " , size , max_size ) ) ) ) . await . ok ( ) ;
2022-09-06 06:56:04 -04:00
continue ;
} ,
None |
2022-09-24 10:01:09 -04:00
Some ( Ok ( Message ::Close ( _ ) ) |
Err ( WsError ::AlreadyClosed | WsError ::ConnectionClosed |
WsError ::Protocol ( tungstenite ::error ::ProtocolError ::ResetWithoutClosingHandshake ) ) )
2022-09-06 06:56:04 -04:00
= > {
2022-12-16 16:22:27 -05:00
debug! ( " websocket close from client (cid: {}, ip: {:?}) " , cid , conn . ip ( ) ) ;
2022-09-06 06:56:04 -04:00
break ;
} ,
Some ( Err ( WsError ::Io ( e ) ) ) = > {
// IO errors are considered fatal
2022-12-16 16:22:27 -05:00
warn! ( " IO error (cid: {}, ip: {:?}): {:?} " , cid , conn . ip ( ) , e ) ;
2022-09-06 06:56:04 -04:00
break ;
}
x = > {
// default condition on error is to close the client connection
2022-12-16 16:22:27 -05:00
info! ( " unknown error (cid: {}, ip: {:?}): {:?} (closing conn) " , cid , conn . ip ( ) , x ) ;
2022-09-06 06:56:04 -04:00
break ;
}
} ;
// convert ws_next into proto_next
match nostr_msg {
Ok ( NostrMessage ::EventMsg ( ec ) ) = > {
// An EventCmd needs to be validated to be converted into an Event
// handle each type of message
2022-11-11 10:20:36 -05:00
let evid = ec . event_id ( ) . to_owned ( ) ;
2022-09-06 06:56:04 -04:00
let parsed : Result < Event > = Result ::< Event > ::from ( ec ) ;
match parsed {
Ok ( e ) = > {
let id_prefix :String = e . id . chars ( ) . take ( 8 ) . collect ( ) ;
2022-12-16 16:22:27 -05:00
debug! ( " successfully parsed/validated event: {:?} (cid: {}) " , id_prefix , cid ) ;
2022-09-06 07:12:07 -04:00
// check if the event is too far in the future.
if e . is_valid_timestamp ( settings . options . reject_future_seconds ) {
// Write this to the database.
let submit_event = SubmittedEvent { event : e . clone ( ) , notice_tx : notice_tx . clone ( ) } ;
event_tx . send ( submit_event ) . await . ok ( ) ;
client_published_event_count + = 1 ;
} else {
2022-11-05 16:59:39 -04:00
info! ( " client: {} sent a far future-dated event " , cid ) ;
2022-09-10 21:40:10 -04:00
if let Some ( fut_sec ) = settings . options . reject_future_seconds {
2022-11-10 17:27:10 -05:00
let msg = format! ( " The event created_at field is out of the acceptable range (+ {} sec) for this relay. " , fut_sec ) ;
let notice = Notice ::invalid ( e . id , & msg ) ;
2023-01-22 10:49:49 -05:00
ws_stream . send ( make_notice_message ( & notice ) ) . await . ok ( ) ;
2022-09-10 21:40:10 -04:00
}
2022-09-06 07:12:07 -04:00
}
2022-09-06 06:56:04 -04:00
} ,
2022-11-11 10:20:36 -05:00
Err ( e ) = > {
2022-12-16 16:22:27 -05:00
info! ( " client sent an invalid event (cid: {}) " , cid ) ;
2023-01-22 10:49:49 -05:00
ws_stream . send ( make_notice_message ( & Notice ::invalid ( evid , & format! ( " {} " , e ) ) ) ) . await . ok ( ) ;
2022-09-06 06:56:04 -04:00
}
}
} ,
Ok ( NostrMessage ::SubMsg ( s ) ) = > {
2022-12-16 16:22:27 -05:00
debug! ( " subscription requested (cid: {}, sub: {:?}) " , cid , s . id ) ;
2022-09-06 06:56:04 -04:00
// subscription handling consists of:
2022-12-17 10:27:29 -05:00
// * check for rate limits
2022-09-06 06:56:04 -04:00
// * registering the subscription so future events can be matched
// * making a channel to cancel to request later
// * sending a request for a SQL query
2023-01-22 10:49:49 -05:00
// Do nothing if the sub already exists.
if conn . has_subscription ( & s ) {
info! ( " client sent duplicate subscription, ignoring (cid: {}, sub: {:?}) " , cid , s . id ) ;
} else {
if let Some ( ref lim ) = sub_lim_opt {
lim . until_ready_with_jitter ( jitter ) . await ;
}
2022-12-16 15:37:02 -05:00
let ( abandon_query_tx , abandon_query_rx ) = oneshot ::channel ::< ( ) > ( ) ;
match conn . subscribe ( s . clone ( ) ) {
2023-01-22 10:49:49 -05:00
Ok ( ( ) ) = > {
2022-12-16 15:37:02 -05:00
// when we insert, if there was a previous query running with the same name, cancel it.
2023-01-22 10:49:49 -05:00
if let Some ( previous_query ) = running_queries . insert ( s . id . clone ( ) , abandon_query_tx ) {
previous_query . send ( ( ) ) . ok ( ) ;
2022-12-16 15:37:02 -05:00
}
2023-01-22 10:49:49 -05:00
if s . needs_historical_events ( ) {
// start a database query. this spawns a blocking database query on a worker thread.
repo . query_subscription ( s , cid . clone ( ) , query_tx . clone ( ) , abandon_query_rx ) . await . ok ( ) ;
2022-12-26 13:20:36 -05:00
}
2023-01-22 10:49:49 -05:00
} ,
Err ( e ) = > {
info! ( " Subscription error: {} (cid: {}, sub: {:?}) " , e , cid , s . id ) ;
ws_stream . send ( make_notice_message ( & Notice ::message ( format! ( " Subscription error: {} " , e ) ) ) ) . await . ok ( ) ;
}
2022-09-06 06:56:04 -04:00
}
2023-01-22 10:49:49 -05:00
}
2022-09-06 06:56:04 -04:00
} ,
Ok ( NostrMessage ::CloseMsg ( cc ) ) = > {
// closing a request simply removes the subscription.
let parsed : Result < Close > = Result ::< Close > ::from ( cc ) ;
2022-09-24 20:28:02 -04:00
if let Ok ( c ) = parsed {
2022-09-06 06:56:04 -04:00
// check if a query is currently
// running, and remove it if so.
let stop_tx = running_queries . remove ( & c . id ) ;
if let Some ( tx ) = stop_tx {
tx . send ( ( ) ) . ok ( ) ;
}
// stop checking new events against
// the subscription
2022-09-24 10:19:16 -04:00
conn . unsubscribe ( & c ) ;
2022-09-24 20:28:02 -04:00
} else {
2022-09-06 06:56:04 -04:00
info! ( " invalid command ignored " ) ;
2023-01-22 10:49:49 -05:00
ws_stream . send ( make_notice_message ( & Notice ::message ( " could not parse command " . into ( ) ) ) ) . await . ok ( ) ;
2022-09-06 06:56:04 -04:00
}
} ,
Err ( Error ::ConnError ) = > {
2022-12-16 16:22:27 -05:00
debug! ( " got connection close/error, disconnecting cid: {}, ip: {:?} " , cid , conn . ip ( ) ) ;
2022-09-06 06:56:04 -04:00
break ;
}
Err ( Error ::EventMaxLengthError ( s ) ) = > {
2022-12-16 16:22:27 -05:00
info! ( " client sent event larger ({} bytes) than max size (cid: {}) " , s , cid ) ;
2023-01-22 10:49:49 -05:00
ws_stream . send ( make_notice_message ( & Notice ::message ( " event exceeded max size " . into ( ) ) ) ) . await . ok ( ) ;
2022-09-06 06:56:04 -04:00
} ,
Err ( Error ::ProtoParseError ) = > {
2022-12-16 16:22:27 -05:00
info! ( " client sent event that could not be parsed (cid: {}) " , cid ) ;
2023-01-22 10:49:49 -05:00
ws_stream . send ( make_notice_message ( & Notice ::message ( " could not parse command " . into ( ) ) ) ) . await . ok ( ) ;
2022-09-06 06:56:04 -04:00
} ,
Err ( e ) = > {
2022-12-16 16:22:27 -05:00
info! ( " got non-fatal error from client (cid: {}, error: {:?} " , cid , e ) ;
2022-09-06 06:56:04 -04:00
} ,
}
} ,
}
}
// connection cleanup - ensure any still running queries are terminated.
2022-09-24 20:28:02 -04:00
for ( _ , stop_tx ) in running_queries {
2022-09-06 06:56:04 -04:00
stop_tx . send ( ( ) ) . ok ( ) ;
}
info! (
2022-12-16 18:01:49 -05:00
" stopping client connection (cid: {}, ip: {:?}, sent: {} events, recv: {} events, connected: {:?}) " ,
2022-11-02 19:33:44 -04:00
cid ,
conn . ip ( ) ,
client_published_event_count ,
2022-12-16 18:01:49 -05:00
client_received_event_count ,
orig_start . elapsed ( )
2022-09-06 06:56:04 -04:00
) ;
}