mirror of
https://github.com/scsibug/nostr-rs-relay.git
synced 2024-11-22 17:19:07 -05:00
Merge pull request #7 from scsibug/master
Fork Sync: Update from parent repository
This commit is contained in:
commit
f6f90b535e
|
@ -202,6 +202,9 @@ reject_future_seconds = 1800
|
||||||
# LNBits api secret
|
# LNBits api secret
|
||||||
#api_secret = "<ln bits api>"
|
#api_secret = "<ln bits api>"
|
||||||
|
|
||||||
|
# Nostr direct message on signup
|
||||||
|
#direct_message=true
|
||||||
|
|
||||||
# Terms of service
|
# Terms of service
|
||||||
#terms_message = """
|
#terms_message = """
|
||||||
#This service (and supporting services) are provided "as is", without warranty of any kind, express or implied.
|
#This service (and supporting services) are provided "as is", without warranty of any kind, express or implied.
|
||||||
|
@ -223,4 +226,6 @@ reject_future_seconds = 1800
|
||||||
|
|
||||||
# Whether or not new sign ups should be allowed
|
# Whether or not new sign ups should be allowed
|
||||||
#sign_ups = false
|
#sign_ups = false
|
||||||
|
|
||||||
|
# optional if `direct_message=false`
|
||||||
#secret_key = "<nostr nsec>"
|
#secret_key = "<nostr nsec>"
|
||||||
|
|
|
@ -10,7 +10,7 @@ and reduce spam and abuse.
|
||||||
|
|
||||||
This will likely evolve substantially, the first goal is to get a
|
This will likely evolve substantially, the first goal is to get a
|
||||||
basic one-way service that lets an externalized program decide on
|
basic one-way service that lets an externalized program decide on
|
||||||
event persistance. This does not represent the final state of gRPC
|
event persistence. This does not represent the final state of gRPC
|
||||||
extensibility in `nostr-rs-relay`.
|
extensibility in `nostr-rs-relay`.
|
||||||
|
|
||||||
## Considerations
|
## Considerations
|
||||||
|
|
|
@ -33,7 +33,7 @@ The LNBits instance must have a signed HTTPS a self signed certificate will not
|
||||||
|
|
||||||
### Concepts
|
### Concepts
|
||||||
|
|
||||||
All authors are initially not admitted to write to the relay. There are two ways to gain access write to the relay. The first is by attempting to post the the relay, upon receiving an event from an author that is not admitted, the relay will send a direct message including the terms of service of the relay and a lighting invoice for the admission cost. Once this invoice is payed the author can write to the relay. For this method to work the author must be reading from the relay. An author can also pay and accept the terms of service via a webpage `https://<relay-url>/join`.
|
All authors are initially not admitted to write to the relay. There are two ways to gain access write to the relay. The first is by attempting to post the the relay, upon receiving an event from an author that is not admitted, the relay will send a direct message including the terms of service of the relay and a lighting invoice for the admission cost. Once this invoice is paid the author can write to the relay. For this method to work the author must be reading from the relay. An author can also pay and accept the terms of service via a webpage `https://<relay-url>/join`.
|
||||||
|
|
||||||
## Design Details
|
## Design Details
|
||||||
|
|
||||||
|
@ -82,4 +82,3 @@ simply to demonstrate a mitigation is possible.
|
||||||
*Threat*: An attacker gains write access by paying the admission fee, and then floods the relay with a large number of spam events.
|
*Threat*: An attacker gains write access by paying the admission fee, and then floods the relay with a large number of spam events.
|
||||||
|
|
||||||
*Mitigation*: The attacker's admission can be revoked and their admission fee will not be refunded. Enabling "cost per event" and increasing the admission cost can also discourage this type of behavior.
|
*Mitigation*: The attacker's admission can be revoked and their admission fee will not be refunded. Enabling "cost per event" and increasing the admission cost can also discourage this type of behavior.
|
||||||
|
|
||||||
|
|
|
@ -117,9 +117,9 @@ Assumptions:
|
||||||
|
|
||||||
* `Traefik` version is `2.9` (other versions not tested).
|
* `Traefik` version is `2.9` (other versions not tested).
|
||||||
* `Traefik` is used for provisioning of Let's Encrypt certificates.
|
* `Traefik` is used for provisioning of Let's Encrypt certificates.
|
||||||
* `Traefik` is running in `Docker`, using `docker compose` and labels for the static configuration. An equivalent setup useing a Traefik config file is possible too (but not covered here).
|
* `Traefik` is running in `Docker`, using `docker compose` and labels for the static configuration. An equivalent setup using a Traefik config file is possible too (but not covered here).
|
||||||
* Strict Transport Security is enabled.
|
* Strict Transport Security is enabled.
|
||||||
* Hostname for the relay is `relay.example.com`, email adres for ACME certificates provider is `name@example.com`.
|
* Hostname for the relay is `relay.example.com`, email address for ACME certificates provider is `name@example.com`.
|
||||||
* ipv6 is enabled, a viable private ipv6 subnet is specified in the example below.
|
* ipv6 is enabled, a viable private ipv6 subnet is specified in the example below.
|
||||||
* Relay is running on port `8080`.
|
* Relay is running on port `8080`.
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ Start by building the application from source. Here is how to do that:
|
||||||
3. `cargo build --release`
|
3. `cargo build --release`
|
||||||
|
|
||||||
### Place the files where they belong
|
### Place the files where they belong
|
||||||
We want to palce the nostr-rs-relay binary and the config.toml file where they belong. While still in the root level of the nostr-rs-relay folder you cloned in last step, run the following commands:
|
We want to place the nostr-rs-relay binary and the config.toml file where they belong. While still in the root level of the nostr-rs-relay folder you cloned in last step, run the following commands:
|
||||||
1. `sudo cp target/release/nostr-rs-relay /usr/local/bin/`
|
1. `sudo cp target/release/nostr-rs-relay /usr/local/bin/`
|
||||||
2. `sudo mkdir /etc/nostr-rs-relay`
|
2. `sudo mkdir /etc/nostr-rs-relay`
|
||||||
2. `sudo cp config.toml /etc/nostr-rs-relay`
|
2. `sudo cp config.toml /etc/nostr-rs-relay`
|
||||||
|
|
|
@ -179,7 +179,7 @@ attempts to persist them to disk. Once validated and persisted, these
|
||||||
events are broadcast to all subscribers.
|
events are broadcast to all subscribers.
|
||||||
|
|
||||||
When verification is enabled, the writer must check to ensure a valid,
|
When verification is enabled, the writer must check to ensure a valid,
|
||||||
unexpired verification record exists for the auther. All metadata
|
unexpired verification record exists for the author. All metadata
|
||||||
events (regardless of verification status) are forwarded to a verifier
|
events (regardless of verification status) are forwarded to a verifier
|
||||||
module. If the verifier determines a new verification record is
|
module. If the verifier determines a new verification record is
|
||||||
needed, it is also responsible for persisting and broadcasting the
|
needed, it is also responsible for persisting and broadcasting the
|
||||||
|
|
|
@ -93,8 +93,9 @@ pub struct PayToRelay {
|
||||||
pub node_url: String,
|
pub node_url: String,
|
||||||
pub api_secret: String,
|
pub api_secret: String,
|
||||||
pub terms_message: String,
|
pub terms_message: String,
|
||||||
pub sign_ups: bool, // allow new users to sign up to relay
|
pub sign_ups: bool, // allow new users to sign up to relay
|
||||||
pub secret_key: String,
|
pub direct_message: bool, // Send direct message to user with invoice and terms
|
||||||
|
pub secret_key: Option<String>,
|
||||||
pub processor: Processor,
|
pub processor: Processor,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -243,7 +244,14 @@ impl Settings {
|
||||||
// Should check that url is valid
|
// Should check that url is valid
|
||||||
assert_ne!(settings.pay_to_relay.node_url, "");
|
assert_ne!(settings.pay_to_relay.node_url, "");
|
||||||
assert_ne!(settings.pay_to_relay.terms_message, "");
|
assert_ne!(settings.pay_to_relay.terms_message, "");
|
||||||
assert_ne!(settings.pay_to_relay.secret_key, "");
|
|
||||||
|
if settings.pay_to_relay.direct_message {
|
||||||
|
assert_ne!(
|
||||||
|
settings.pay_to_relay.secret_key,
|
||||||
|
Some("<nostr nsec>".to_string())
|
||||||
|
);
|
||||||
|
assert!(settings.pay_to_relay.secret_key.is_some());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(settings)
|
Ok(settings)
|
||||||
|
@ -306,7 +314,8 @@ impl Default for Settings {
|
||||||
node_url: "".to_string(),
|
node_url: "".to_string(),
|
||||||
api_secret: "".to_string(),
|
api_secret: "".to_string(),
|
||||||
sign_ups: false,
|
sign_ups: false,
|
||||||
secret_key: "".to_string(),
|
direct_message: true,
|
||||||
|
secret_key: None,
|
||||||
processor: Processor::LNBits,
|
processor: Processor::LNBits,
|
||||||
},
|
},
|
||||||
verified_users: VerifiedUsers {
|
verified_users: VerifiedUsers {
|
||||||
|
|
|
@ -204,7 +204,7 @@ impl ClientConn {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match (relay.and_then(|url| host_str(url)), host_str(relay_url)) {
|
match (relay.and_then(host_str), host_str(relay_url)) {
|
||||||
(Some(received_relay), Some(our_relay)) => {
|
(Some(received_relay), Some(our_relay)) => {
|
||||||
if received_relay != our_relay {
|
if received_relay != our_relay {
|
||||||
return Err(Error::AuthFailure);
|
return Err(Error::AuthFailure);
|
||||||
|
|
|
@ -45,8 +45,8 @@ pub const DB_FILE: &str = "nostr.db";
|
||||||
/// Will panic if the pool could not be created.
|
/// Will panic if the pool could not be created.
|
||||||
pub async fn build_repo(settings: &Settings, metrics: NostrMetrics) -> Arc<dyn NostrRepo> {
|
pub async fn build_repo(settings: &Settings, metrics: NostrMetrics) -> Arc<dyn NostrRepo> {
|
||||||
match settings.database.engine.as_str() {
|
match settings.database.engine.as_str() {
|
||||||
"sqlite" => Arc::new(build_sqlite_pool(&settings, metrics).await),
|
"sqlite" => Arc::new(build_sqlite_pool(settings, metrics).await),
|
||||||
"postgres" => Arc::new(build_postgres_pool(&settings, metrics).await),
|
"postgres" => Arc::new(build_postgres_pool(settings, metrics).await),
|
||||||
_ => panic!("Unknown database engine"),
|
_ => panic!("Unknown database engine"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -378,7 +378,7 @@ pub async fn db_writer(
|
||||||
notice_tx
|
notice_tx
|
||||||
.try_send(Notice::blocked(
|
.try_send(Notice::blocked(
|
||||||
event.id,
|
event.id,
|
||||||
&decision.message().unwrap_or_else(|| "".to_string()),
|
&decision.message().unwrap_or_default(),
|
||||||
))
|
))
|
||||||
.ok();
|
.ok();
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -472,12 +472,11 @@ mod tests {
|
||||||
let mut event = Event::simple_event();
|
let mut event = Event::simple_event();
|
||||||
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
|
event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]];
|
||||||
event.build_index();
|
event.build_index();
|
||||||
assert_eq!(
|
assert!(
|
||||||
event.generic_tag_val_intersect(
|
event.generic_tag_val_intersect(
|
||||||
'e',
|
'e',
|
||||||
&HashSet::from(["foo".to_owned(), "bar".to_owned()])
|
&HashSet::from(["foo".to_owned(), "bar".to_owned()])
|
||||||
),
|
)
|
||||||
true
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,6 +14,6 @@ pub mod notice;
|
||||||
pub mod repo;
|
pub mod repo;
|
||||||
pub mod subscription;
|
pub mod subscription;
|
||||||
pub mod utils;
|
pub mod utils;
|
||||||
// Public API for creating relays programatically
|
// Public API for creating relays programmatically
|
||||||
pub mod payment;
|
pub mod payment;
|
||||||
pub mod server;
|
pub mod server;
|
||||||
|
|
|
@ -35,7 +35,7 @@ impl std::convert::From<Nip05Name> for nauthz_grpc::event_request::Nip05Name {
|
||||||
fn from(value: Nip05Name) -> Self {
|
fn from(value: Nip05Name) -> Self {
|
||||||
nauthz_grpc::event_request::Nip05Name {
|
nauthz_grpc::event_request::Nip05Name {
|
||||||
local: value.local.clone(),
|
local: value.local.clone(),
|
||||||
domain: value.domain.clone(),
|
domain: value.domain,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -57,7 +57,7 @@ impl EventAuthzService {
|
||||||
eas
|
eas
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn ready_connection(self: &mut Self) {
|
pub async fn ready_connection(&mut self) {
|
||||||
if self.conn.is_none() {
|
if self.conn.is_none() {
|
||||||
let client = AuthorizationClient::connect(self.server_addr.to_string()).await;
|
let client = AuthorizationClient::connect(self.server_addr.to_string()).await;
|
||||||
if let Err(ref msg) = client {
|
if let Err(ref msg) = client {
|
||||||
|
@ -70,7 +70,7 @@ impl EventAuthzService {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn admit_event(
|
pub async fn admit_event(
|
||||||
self: &mut Self,
|
&mut self,
|
||||||
event: &Event,
|
event: &Event,
|
||||||
ip: &str,
|
ip: &str,
|
||||||
origin: Option<String>,
|
origin: Option<String>,
|
||||||
|
@ -99,13 +99,13 @@ impl EventAuthzService {
|
||||||
origin,
|
origin,
|
||||||
user_agent,
|
user_agent,
|
||||||
auth_pubkey,
|
auth_pubkey,
|
||||||
nip05: nip05.map(|x| nauthz_grpc::event_request::Nip05Name::from(x)),
|
nip05: nip05.map(nauthz_grpc::event_request::Nip05Name::from),
|
||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
let reply = svr_res.into_inner();
|
let reply = svr_res.into_inner();
|
||||||
return Ok(Box::new(reply));
|
Ok(Box::new(reply))
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::AuthzError);
|
Err(Error::AuthzError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,10 +5,10 @@ use hyper::Client;
|
||||||
use hyper_tls::HttpsConnector;
|
use hyper_tls::HttpsConnector;
|
||||||
use nostr::Keys;
|
use nostr::Keys;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use tracing::debug;
|
|
||||||
|
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use url::Url;
|
use url::Url;
|
||||||
|
@ -110,8 +110,7 @@ impl PaymentProcessor for LNBitsPaymentProcessor {
|
||||||
expiry: 3600,
|
expiry: 3600,
|
||||||
};
|
};
|
||||||
let url = Url::parse(&self.settings.pay_to_relay.node_url)?.join(APIPATH)?;
|
let url = Url::parse(&self.settings.pay_to_relay.node_url)?.join(APIPATH)?;
|
||||||
let uri = Uri::from_str(url.as_str().strip_suffix("/").unwrap_or(url.as_str())).unwrap();
|
let uri = Uri::from_str(url.as_str().strip_suffix('/').unwrap_or(url.as_str())).unwrap();
|
||||||
debug!("{uri}");
|
|
||||||
|
|
||||||
let req = hyper::Request::builder()
|
let req = hyper::Request::builder()
|
||||||
.method(hyper::Method::POST)
|
.method(hyper::Method::POST)
|
||||||
|
@ -122,14 +121,10 @@ impl PaymentProcessor for LNBitsPaymentProcessor {
|
||||||
|
|
||||||
let res = self.client.request(req).await?;
|
let res = self.client.request(req).await?;
|
||||||
|
|
||||||
debug!("{res:?}");
|
|
||||||
|
|
||||||
// Json to Struct of LNbits callback
|
// Json to Struct of LNbits callback
|
||||||
let body = hyper::body::to_bytes(res.into_body()).await?;
|
let body = hyper::body::to_bytes(res.into_body()).await?;
|
||||||
let invoice_response: LNBitsCreateInvoiceResponse = serde_json::from_slice(&body)?;
|
let invoice_response: LNBitsCreateInvoiceResponse = serde_json::from_slice(&body)?;
|
||||||
|
|
||||||
debug!("{:?}", invoice_response);
|
|
||||||
|
|
||||||
Ok(InvoiceInfo {
|
Ok(InvoiceInfo {
|
||||||
pubkey: key.public_key().to_string(),
|
pubkey: key.public_key().to_string(),
|
||||||
payment_hash: invoice_response.payment_hash,
|
payment_hash: invoice_response.payment_hash,
|
||||||
|
@ -147,7 +142,6 @@ impl PaymentProcessor for LNBitsPaymentProcessor {
|
||||||
.join(APIPATH)?
|
.join(APIPATH)?
|
||||||
.join(payment_hash)?;
|
.join(payment_hash)?;
|
||||||
let uri = Uri::from_str(url.as_str()).unwrap();
|
let uri = Uri::from_str(url.as_str()).unwrap();
|
||||||
debug!("{uri}");
|
|
||||||
|
|
||||||
let req = hyper::Request::builder()
|
let req = hyper::Request::builder()
|
||||||
.method(hyper::Method::GET)
|
.method(hyper::Method::GET)
|
||||||
|
@ -159,13 +153,18 @@ impl PaymentProcessor for LNBitsPaymentProcessor {
|
||||||
let res = self.client.request(req).await?;
|
let res = self.client.request(req).await?;
|
||||||
// Json to Struct of LNbits callback
|
// Json to Struct of LNbits callback
|
||||||
let body = hyper::body::to_bytes(res.into_body()).await?;
|
let body = hyper::body::to_bytes(res.into_body()).await?;
|
||||||
debug!("check invoice: {body:?}");
|
let invoice_response: Value = serde_json::from_slice(&body)?;
|
||||||
let invoice_response: LNBitsCheckInvoiceResponse = serde_json::from_slice(&body)?;
|
|
||||||
|
|
||||||
let status = if invoice_response.paid {
|
let status = if let Ok(invoice_response) =
|
||||||
InvoiceStatus::Paid
|
serde_json::from_value::<LNBitsCheckInvoiceResponse>(invoice_response)
|
||||||
|
{
|
||||||
|
if invoice_response.paid {
|
||||||
|
InvoiceStatus::Paid
|
||||||
|
} else {
|
||||||
|
InvoiceStatus::Unpaid
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
InvoiceStatus::Unpaid
|
InvoiceStatus::Expired
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(status)
|
Ok(status)
|
||||||
|
|
|
@ -25,7 +25,7 @@ pub struct Payment {
|
||||||
/// Settings
|
/// Settings
|
||||||
settings: crate::config::Settings,
|
settings: crate::config::Settings,
|
||||||
// Nostr Keys
|
// Nostr Keys
|
||||||
nostr_keys: Keys,
|
nostr_keys: Option<Keys>,
|
||||||
/// Payment Processor
|
/// Payment Processor
|
||||||
processor: Arc<dyn PaymentProcessor>,
|
processor: Arc<dyn PaymentProcessor>,
|
||||||
}
|
}
|
||||||
|
@ -102,7 +102,11 @@ impl Payment {
|
||||||
info!("Create payment handler");
|
info!("Create payment handler");
|
||||||
|
|
||||||
// Create nostr key from sk string
|
// Create nostr key from sk string
|
||||||
let nostr_keys = Keys::from_sk_str(&settings.pay_to_relay.secret_key)?;
|
let nostr_keys = if let Some(secret_key) = &settings.pay_to_relay.secret_key {
|
||||||
|
Some(Keys::from_sk_str(secret_key)?)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
// Create processor kind defined in settings
|
// Create processor kind defined in settings
|
||||||
let processor = match &settings.pay_to_relay.processor {
|
let processor = match &settings.pay_to_relay.processor {
|
||||||
|
@ -130,7 +134,7 @@ impl Payment {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Internal select loop for preforming payment operatons
|
/// Internal select loop for preforming payment operations
|
||||||
async fn run_internal(&mut self) -> Result<()> {
|
async fn run_internal(&mut self) -> Result<()> {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
m = self.payment_rx.recv() => {
|
m = self.payment_rx.recv() => {
|
||||||
|
@ -148,7 +152,7 @@ impl Payment {
|
||||||
Ok(PaymentMessage::CheckAccount(pubkey)) => {
|
Ok(PaymentMessage::CheckAccount(pubkey)) => {
|
||||||
let keys = Keys::from_pk_str(&pubkey)?;
|
let keys = Keys::from_pk_str(&pubkey)?;
|
||||||
|
|
||||||
if let Some(invoice_info) = self.repo.get_unpaid_invoice(&keys).await? {
|
if let Ok(Some(invoice_info)) = self.repo.get_unpaid_invoice(&keys).await {
|
||||||
match self.check_invoice_status(&invoice_info.payment_hash).await? {
|
match self.check_invoice_status(&invoice_info.payment_hash).await? {
|
||||||
InvoiceStatus::Paid => {
|
InvoiceStatus::Paid => {
|
||||||
self.repo.admit_account(&keys, self.settings.pay_to_relay.admission_cost).await?;
|
self.repo.admit_account(&keys, self.settings.pay_to_relay.admission_cost).await?;
|
||||||
|
@ -158,6 +162,10 @@ impl Payment {
|
||||||
self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok();
|
self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
let amount = self.settings.pay_to_relay.admission_cost;
|
||||||
|
let invoice_info = self.get_invoice_info(&pubkey, amount).await?;
|
||||||
|
self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(PaymentMessage::InvoicePaid(payment_hash)) => {
|
Ok(PaymentMessage::InvoicePaid(payment_hash)) => {
|
||||||
|
@ -189,6 +197,11 @@ impl Payment {
|
||||||
pubkey: &str,
|
pubkey: &str,
|
||||||
invoice_info: &InvoiceInfo,
|
invoice_info: &InvoiceInfo,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
let nostr_keys = match &self.nostr_keys {
|
||||||
|
Some(key) => key,
|
||||||
|
None => return Err(Error::CustomError("Nostr key not defined".to_string())),
|
||||||
|
};
|
||||||
|
|
||||||
// Create Nostr key from pk
|
// Create Nostr key from pk
|
||||||
let key = Keys::from_pk_str(pubkey)?;
|
let key = Keys::from_pk_str(pubkey)?;
|
||||||
|
|
||||||
|
@ -196,16 +209,16 @@ impl Payment {
|
||||||
|
|
||||||
// Event DM with terms of service
|
// Event DM with terms of service
|
||||||
let message_event: NostrEvent = EventBuilder::new_encrypted_direct_msg(
|
let message_event: NostrEvent = EventBuilder::new_encrypted_direct_msg(
|
||||||
&self.nostr_keys,
|
nostr_keys,
|
||||||
pubkey,
|
pubkey,
|
||||||
&self.settings.pay_to_relay.terms_message,
|
&self.settings.pay_to_relay.terms_message,
|
||||||
)?
|
)?
|
||||||
.to_event(&self.nostr_keys)?;
|
.to_event(nostr_keys)?;
|
||||||
|
|
||||||
// Event DM with invoice
|
// Event DM with invoice
|
||||||
let invoice_event: NostrEvent =
|
let invoice_event: NostrEvent =
|
||||||
EventBuilder::new_encrypted_direct_msg(&self.nostr_keys, pubkey, &invoice_info.bolt11)?
|
EventBuilder::new_encrypted_direct_msg(nostr_keys, pubkey, &invoice_info.bolt11)?
|
||||||
.to_event(&self.nostr_keys)?;
|
.to_event(nostr_keys)?;
|
||||||
|
|
||||||
// Persist DM events to DB
|
// Persist DM events to DB
|
||||||
self.repo.write_event(&message_event.clone().into()).await?;
|
self.repo.write_event(&message_event.clone().into()).await?;
|
||||||
|
@ -242,8 +255,10 @@ impl Payment {
|
||||||
.create_invoice_record(&key, invoice_info.clone())
|
.create_invoice_record(&key, invoice_info.clone())
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// Admission event invoice and terms to pubkey that is joining
|
if self.settings.pay_to_relay.direct_message {
|
||||||
self.send_admission_message(pubkey, &invoice_info).await?;
|
// Admission event invoice and terms to pubkey that is joining
|
||||||
|
self.send_admission_message(pubkey, &invoice_info).await?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(invoice_info)
|
Ok(invoice_info)
|
||||||
}
|
}
|
||||||
|
|
|
@ -123,7 +123,7 @@ CREATE TABLE "tag" (
|
||||||
CREATE INDEX tag_event_id_idx ON tag USING btree (event_id, name);
|
CREATE INDEX tag_event_id_idx ON tag USING btree (event_id, name);
|
||||||
CREATE INDEX tag_value_idx ON tag USING btree (value);
|
CREATE INDEX tag_value_idx ON tag USING btree (value);
|
||||||
|
|
||||||
-- NIP-05 Verfication table
|
-- NIP-05 Verification table
|
||||||
CREATE TABLE "user_verification" (
|
CREATE TABLE "user_verification" (
|
||||||
id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY,
|
id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY,
|
||||||
event_id bytea NOT NULL,
|
event_id bytea NOT NULL,
|
||||||
|
|
|
@ -842,7 +842,8 @@ impl NostrRepo for SqliteRepo {
|
||||||
async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result<String> {
|
async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result<String> {
|
||||||
let mut conn = self.write_pool.get()?;
|
let mut conn = self.write_pool.get()?;
|
||||||
let payment_hash = payment_hash.to_owned();
|
let payment_hash = payment_hash.to_owned();
|
||||||
let pub_key = tokio::task::spawn_blocking(move || {
|
|
||||||
|
tokio::task::spawn_blocking(move || {
|
||||||
let tx = conn.transaction()?;
|
let tx = conn.transaction()?;
|
||||||
let pubkey: String;
|
let pubkey: String;
|
||||||
{
|
{
|
||||||
|
@ -884,8 +885,7 @@ impl NostrRepo for SqliteRepo {
|
||||||
let ok: Result<String> = Ok(pubkey);
|
let ok: Result<String> = Ok(pubkey);
|
||||||
ok
|
ok
|
||||||
})
|
})
|
||||||
.await?;
|
.await?
|
||||||
pub_key
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the most recent invoice for a given pubkey
|
/// Get the most recent invoice for a given pubkey
|
||||||
|
@ -978,7 +978,7 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
|
||||||
return (empty_query, empty_params, None);
|
return (empty_query, empty_params, None);
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if the index needs to be overriden
|
// check if the index needs to be overridden
|
||||||
let idx_name = override_index(f);
|
let idx_name = override_index(f);
|
||||||
let idx_stmt = idx_name
|
let idx_stmt = idx_name
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -1080,18 +1080,18 @@ fn query_from_filter(f: &ReqFilter) -> (String, Vec<Box<dyn ToSql>>, Option<Stri
|
||||||
ks.iter().map(std::string::ToString::to_string).collect();
|
ks.iter().map(std::string::ToString::to_string).collect();
|
||||||
kind_clause = format!("AND kind IN ({})", str_kinds.join(", "));
|
kind_clause = format!("AND kind IN ({})", str_kinds.join(", "));
|
||||||
} else {
|
} else {
|
||||||
kind_clause = format!("");
|
kind_clause = String::new();
|
||||||
};
|
};
|
||||||
if f.since.is_some() {
|
if f.since.is_some() {
|
||||||
since_clause = format!("AND created_at > {}", f.since.unwrap());
|
since_clause = format!("AND created_at > {}", f.since.unwrap());
|
||||||
} else {
|
} else {
|
||||||
since_clause = format!("");
|
since_clause = String::new();
|
||||||
};
|
};
|
||||||
// Query for timestamp
|
// Query for timestamp
|
||||||
if f.until.is_some() {
|
if f.until.is_some() {
|
||||||
until_clause = format!("AND created_at < {}", f.until.unwrap());
|
until_clause = format!("AND created_at < {}", f.until.unwrap());
|
||||||
} else {
|
} else {
|
||||||
until_clause = format!("");
|
until_clause = String::new();
|
||||||
};
|
};
|
||||||
|
|
||||||
let tag_clause = format!(
|
let tag_clause = format!(
|
||||||
|
|
|
@ -827,7 +827,7 @@ pub fn start_server(settings: &Settings, shutdown_rx: MpscReceiver<()>) -> Resul
|
||||||
info!("listening on: {}", socket_addr);
|
info!("listening on: {}", socket_addr);
|
||||||
// all client-submitted valid events are broadcast to every
|
// all client-submitted valid events are broadcast to every
|
||||||
// other client on this channel. This should be large enough
|
// other client on this channel. This should be large enough
|
||||||
// to accomodate slower readers (messages are dropped if
|
// to accommodate slower readers (messages are dropped if
|
||||||
// clients can not keep up).
|
// clients can not keep up).
|
||||||
let (bcast_tx, _) = broadcast::channel::<Event>(broadcast_buffer_limit);
|
let (bcast_tx, _) = broadcast::channel::<Event>(broadcast_buffer_limit);
|
||||||
// validated events that need to be persisted are sent to the
|
// validated events that need to be persisted are sent to the
|
||||||
|
@ -1125,8 +1125,8 @@ async fn nostr_server(
|
||||||
|
|
||||||
let unspec = "<unspecified>".to_string();
|
let unspec = "<unspecified>".to_string();
|
||||||
info!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
|
info!("new client connection (cid: {}, ip: {:?})", cid, conn.ip());
|
||||||
let origin = client_info.origin.as_ref().unwrap_or_else(|| &unspec);
|
let origin = client_info.origin.as_ref().unwrap_or(&unspec);
|
||||||
let user_agent = client_info.user_agent.as_ref().unwrap_or_else(|| &unspec);
|
let user_agent = client_info.user_agent.as_ref().unwrap_or(&unspec);
|
||||||
info!(
|
info!(
|
||||||
"cid: {}, origin: {:?}, user-agent: {:?}",
|
"cid: {}, origin: {:?}, user-agent: {:?}",
|
||||||
cid, origin, user_agent
|
cid, origin, user_agent
|
||||||
|
@ -1175,14 +1175,12 @@ async fn nostr_server(
|
||||||
if query_result.event == "EOSE" {
|
if query_result.event == "EOSE" {
|
||||||
let send_str = format!("[\"EOSE\",\"{subesc}\"]");
|
let send_str = format!("[\"EOSE\",\"{subesc}\"]");
|
||||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||||
} else {
|
} else if allowed_to_send(&query_result.event, &conn, &settings) {
|
||||||
if allowed_to_send(&query_result.event, &conn, &settings) {
|
metrics.sent_events.with_label_values(&["db"]).inc();
|
||||||
metrics.sent_events.with_label_values(&["db"]).inc();
|
client_received_event_count += 1;
|
||||||
client_received_event_count += 1;
|
// send a result
|
||||||
// send a result
|
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
|
||||||
let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event);
|
ws_stream.send(Message::Text(send_str)).await.ok();
|
||||||
ws_stream.send(Message::Text(send_str)).await.ok();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
// TODO: consider logging the LaggedRecv error
|
// TODO: consider logging the LaggedRecv error
|
||||||
|
@ -1278,7 +1276,7 @@ async fn nostr_server(
|
||||||
// check if the event is too far in the future.
|
// check if the event is too far in the future.
|
||||||
} else if e.is_valid_timestamp(settings.options.reject_future_seconds) {
|
} else if e.is_valid_timestamp(settings.options.reject_future_seconds) {
|
||||||
// Write this to the database.
|
// Write this to the database.
|
||||||
let auth_pubkey = conn.auth_pubkey().and_then(|pubkey| hex::decode(&pubkey).ok());
|
let auth_pubkey = conn.auth_pubkey().and_then(|pubkey| hex::decode(pubkey).ok());
|
||||||
let submit_event = SubmittedEvent {
|
let submit_event = SubmittedEvent {
|
||||||
event: e.clone(),
|
event: e.clone(),
|
||||||
notice_tx: notice_tx.clone(),
|
notice_tx: notice_tx.clone(),
|
||||||
|
@ -1307,7 +1305,7 @@ async fn nostr_server(
|
||||||
error!("AUTH command received, but relay_url is not set in the config file (cid: {})", cid);
|
error!("AUTH command received, but relay_url is not set in the config file (cid: {})", cid);
|
||||||
},
|
},
|
||||||
Some(relay) => {
|
Some(relay) => {
|
||||||
match conn.authenticate(&event, &relay) {
|
match conn.authenticate(&event, relay) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
let pubkey = match conn.auth_pubkey() {
|
let pubkey = match conn.auth_pubkey() {
|
||||||
Some(k) => k.chars().take(8).collect(),
|
Some(k) => k.chars().take(8).collect(),
|
||||||
|
|
|
@ -188,7 +188,7 @@ impl<'de> Deserialize<'de> for Subscription {
|
||||||
D: Deserializer<'de>,
|
D: Deserializer<'de>,
|
||||||
{
|
{
|
||||||
let mut v: Value = Deserialize::deserialize(deserializer)?;
|
let mut v: Value = Deserialize::deserialize(deserializer)?;
|
||||||
// this shoud be a 3-or-more element array.
|
// this should be a 3-or-more element array.
|
||||||
// verify the first element is a String, REQ
|
// verify the first element is a String, REQ
|
||||||
// get the subscription from the second element.
|
// get the subscription from the second element.
|
||||||
// convert each of the remaining objects into filters
|
// convert each of the remaining objects into filters
|
||||||
|
|
|
@ -50,15 +50,15 @@ mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
fn lower_hex() {
|
fn lower_hex() {
|
||||||
let hexstr = "abcd0123";
|
let hexstr = "abcd0123";
|
||||||
assert_eq!(is_lower_hex(hexstr), true);
|
assert!(is_lower_hex(hexstr));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn nip19() {
|
fn nip19() {
|
||||||
let hexkey = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d";
|
let hexkey = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d";
|
||||||
let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6";
|
let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6";
|
||||||
assert_eq!(is_nip19(hexkey), false);
|
assert!(!is_nip19(hexkey));
|
||||||
assert_eq!(is_nip19(nip19key), true);
|
assert!(is_nip19(nip19key));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
|
@ -334,9 +334,9 @@ mod tests {
|
||||||
id: "0".to_owned(),
|
id: "0".to_owned(),
|
||||||
pubkey: public_key.to_hex(),
|
pubkey: public_key.to_hex(),
|
||||||
delegated_by: None,
|
delegated_by: None,
|
||||||
created_at: created_at,
|
created_at,
|
||||||
kind: kind,
|
kind,
|
||||||
tags: tags,
|
tags,
|
||||||
content: "".to_owned(),
|
content: "".to_owned(),
|
||||||
sig: "0".to_owned(),
|
sig: "0".to_owned(),
|
||||||
tagidx: None,
|
tagidx: None,
|
||||||
|
|
Loading…
Reference in New Issue
Block a user